feature/implement wss routes (#1196)

* feat(database): create schema for hway and motr

* fix(gateway): correct naming inconsistencies in handlers

* build: update schema file to be compatible with postgresql syntax

* fix: update schema to be compatible with PostgreSQL syntax

* chore: update query_hway.sql to follow sqlc syntax

* ```text
refactor: update query_hway.sql for PostgreSQL and sqlc
```

* feat: add vaults table to store encrypted data

* refactor: Update vaults table schema for sqlc compatibility

* chore(deps): Upgrade dependencies and add pgx/v5

* refactor(Makefile): move sqlc generate to internal/models

* docs(foundations): remove outdated pages

* chore(build): add Taskfile for build tasks

* refactor(embed): move embed files to internal package

* docs: add documentation for Cosmos SDK ORM
This commit is contained in:
Prad Nukala 2024-12-18 15:53:45 -05:00 committed by GitHub
parent fc001216a8
commit 6072f6ecfa
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
111 changed files with 4919 additions and 8584 deletions

View File

@ -5,4 +5,4 @@ version_scheme = "semver"
version = "0.5.27"
update_changelog_on_bump = true
major_version_zero = true
changelog_file = "./docs/docs/changelog/index.md"
changelog_file = "./docs/docs/changelog.md"

1
.gitignore vendored
View File

@ -102,3 +102,4 @@ tools-stamp
sonr.log
interchaintest-downloader
.haptic

View File

@ -3,11 +3,17 @@ version: 2
project_name: sonr
builds:
- id: motr
main: ./cmd/motr
binary: app
goos:
- js
goarch:
- wasm
- id: sonr
main: ./cmd/sonrd
binary: sonrd
builder: go
gobinary: go
mod_timestamp: "{{ .CommitTimestamp }}"
goos:
- linux
@ -33,8 +39,6 @@ builds:
- id: hway
main: ./cmd/hway
binary: hway
builder: go
gobinary: go
goos:
- linux
- darwin

View File

@ -310,12 +310,11 @@ sh-testnet: mod-tidy
gen-pkl: init-env
pkl-gen-go pkl/sonr.orm/UCAN.pkl
pkl-gen-go pkl/sonr.orm/Models.pkl
pkl-gen-go pkl/sonr.net/Hway.pkl
pkl-gen-go pkl/sonr.net/Motr.pkl
gen-sqlc: init-env
@cd internal/database && sqlc generate
@cd internal/models && sqlc generate
gen-templ: init-env
@templ generate

38
Taskfile.yml Normal file
View File

@ -0,0 +1,38 @@
version: '3'
vars:
VERSION:
sh: git describe --tags --abbrev=0
COMMIT:
sh: git rev-parse --short HEAD
ROOT_DIR:
sh: git rev-parse --show-toplevel
tasks:
default:
cmds:
- echo "{{.VERSION}}"
- echo "{{.COMMIT}}"
- echo "{{.ROOT_DIR}}"
silent: true
build:
silent: true
cmds:
- task: build:motr
- task: build:sonr
- task: build:hway
build:motr:
internal: true
silent: true
cmd: goreleaser build --snapshot --id motr --clean -o ./static/wasm/app.wasm
build:sonr:
internal: true
silent: true
cmd: goreleaser build --snapshot --id sonr --single-target --clean -o ./build/sonrd
build:hway:
internal: true
silent: true
cmd: goreleaser build --snapshot --id hway --single-target --clean -o ./build/hway

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -20,9 +20,7 @@ const _ = grpc.SupportPackageIsVersion9
const (
Query_Params_FullMethodName = "/dwn.v1.Query/Params"
Query_Schema_FullMethodName = "/dwn.v1.Query/Schema"
Query_Allocate_FullMethodName = "/dwn.v1.Query/Allocate"
Query_Sync_FullMethodName = "/dwn.v1.Query/Sync"
)
// QueryClient is the client API for Query service.
@ -33,15 +31,9 @@ const (
type QueryClient interface {
// Params queries all parameters of the module.
Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error)
// Schema queries the DID document by its id. And returns the required PKL
// information
Schema(ctx context.Context, in *QuerySchemaRequest, opts ...grpc.CallOption) (*QuerySchemaResponse, error)
// Allocate initializes a Target Vault available for claims with a compatible
// Authentication mechanism. The default authentication mechanism is WebAuthn.
Allocate(ctx context.Context, in *QueryAllocateRequest, opts ...grpc.CallOption) (*QueryAllocateResponse, error)
// Sync queries the DID document by its id. And returns the required PKL
// information
Sync(ctx context.Context, in *QuerySyncRequest, opts ...grpc.CallOption) (*QuerySyncResponse, error)
}
type queryClient struct {
@ -62,16 +54,6 @@ func (c *queryClient) Params(ctx context.Context, in *QueryParamsRequest, opts .
return out, nil
}
func (c *queryClient) Schema(ctx context.Context, in *QuerySchemaRequest, opts ...grpc.CallOption) (*QuerySchemaResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(QuerySchemaResponse)
err := c.cc.Invoke(ctx, Query_Schema_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *queryClient) Allocate(ctx context.Context, in *QueryAllocateRequest, opts ...grpc.CallOption) (*QueryAllocateResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(QueryAllocateResponse)
@ -82,16 +64,6 @@ func (c *queryClient) Allocate(ctx context.Context, in *QueryAllocateRequest, op
return out, nil
}
func (c *queryClient) Sync(ctx context.Context, in *QuerySyncRequest, opts ...grpc.CallOption) (*QuerySyncResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(QuerySyncResponse)
err := c.cc.Invoke(ctx, Query_Sync_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
// QueryServer is the server API for Query service.
// All implementations must embed UnimplementedQueryServer
// for forward compatibility.
@ -100,15 +72,9 @@ func (c *queryClient) Sync(ctx context.Context, in *QuerySyncRequest, opts ...gr
type QueryServer interface {
// Params queries all parameters of the module.
Params(context.Context, *QueryParamsRequest) (*QueryParamsResponse, error)
// Schema queries the DID document by its id. And returns the required PKL
// information
Schema(context.Context, *QuerySchemaRequest) (*QuerySchemaResponse, error)
// Allocate initializes a Target Vault available for claims with a compatible
// Authentication mechanism. The default authentication mechanism is WebAuthn.
Allocate(context.Context, *QueryAllocateRequest) (*QueryAllocateResponse, error)
// Sync queries the DID document by its id. And returns the required PKL
// information
Sync(context.Context, *QuerySyncRequest) (*QuerySyncResponse, error)
mustEmbedUnimplementedQueryServer()
}
@ -122,15 +88,9 @@ type UnimplementedQueryServer struct{}
func (UnimplementedQueryServer) Params(context.Context, *QueryParamsRequest) (*QueryParamsResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Params not implemented")
}
func (UnimplementedQueryServer) Schema(context.Context, *QuerySchemaRequest) (*QuerySchemaResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Schema not implemented")
}
func (UnimplementedQueryServer) Allocate(context.Context, *QueryAllocateRequest) (*QueryAllocateResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Allocate not implemented")
}
func (UnimplementedQueryServer) Sync(context.Context, *QuerySyncRequest) (*QuerySyncResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Sync not implemented")
}
func (UnimplementedQueryServer) mustEmbedUnimplementedQueryServer() {}
func (UnimplementedQueryServer) testEmbeddedByValue() {}
@ -170,24 +130,6 @@ func _Query_Params_Handler(srv interface{}, ctx context.Context, dec func(interf
return interceptor(ctx, in, info, handler)
}
func _Query_Schema_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(QuerySchemaRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(QueryServer).Schema(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Query_Schema_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(QueryServer).Schema(ctx, req.(*QuerySchemaRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Query_Allocate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(QueryAllocateRequest)
if err := dec(in); err != nil {
@ -206,24 +148,6 @@ func _Query_Allocate_Handler(srv interface{}, ctx context.Context, dec func(inte
return interceptor(ctx, in, info, handler)
}
func _Query_Sync_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(QuerySyncRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(QueryServer).Sync(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Query_Sync_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(QueryServer).Sync(ctx, req.(*QuerySyncRequest))
}
return interceptor(ctx, in, info, handler)
}
// Query_ServiceDesc is the grpc.ServiceDesc for Query service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
@ -235,18 +159,10 @@ var Query_ServiceDesc = grpc.ServiceDesc{
MethodName: "Params",
Handler: _Query_Params_Handler,
},
{
MethodName: "Schema",
Handler: _Query_Schema_Handler,
},
{
MethodName: "Allocate",
Handler: _Query_Allocate_Handler,
},
{
MethodName: "Sync",
Handler: _Query_Sync_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "dwn/v1/query.proto",

View File

@ -39,7 +39,11 @@ func rootCmd() *cobra.Command {
if err != nil {
panic(err)
}
e, err := gateway.New(env, ipc)
dbq, err := setupPostgresDB()
if err != nil {
panic(err)
}
e, err := gateway.New(env, ipc, dbq)
if err != nil {
panic(err)
}
@ -56,10 +60,10 @@ func rootCmd() *cobra.Command {
cmd.Flags().StringVar(&sonrAPIURL, "sonr-api-url", "localhost:1317", "Sonr API URL")
cmd.Flags().StringVar(&sonrGrpcURL, "sonr-grpc-url", "localhost:9090", "Sonr gRPC URL")
cmd.Flags().StringVar(&sonrRPCURL, "sonr-rpc-url", "localhost:26657", "Sonr RPC URL")
cmd.Flags().StringVar(&psqlHost, "psql-host", "", "PostgresSQL Host")
cmd.Flags().StringVar(&psqlUser, "psql-user", "", "PostgresSQL User")
cmd.Flags().StringVar(&psqlPass, "psql-pass", "", "PostgresSQL Password")
cmd.Flags().StringVar(&psqlDB, "psql-db", "", "PostgresSQL Database")
cmd.Flags().StringVar(&psqlHost, "psql-host", "localhost", "PostgresSQL Host")
cmd.Flags().StringVar(&psqlUser, "psql-user", "postgres", "PostgresSQL User")
cmd.Flags().StringVar(&psqlPass, "psql-pass", "postgres", "PostgresSQL Password")
cmd.Flags().StringVar(&psqlDB, "psql-db", "highway", "PostgresSQL Database")
return cmd
}

View File

@ -1,11 +1,14 @@
package main
import (
"context"
_ "embed"
"fmt"
"os"
"github.com/jackc/pgx/v5"
config "github.com/onsonr/sonr/internal/config/hway"
"github.com/onsonr/sonr/internal/models/drivers/hwayorm"
)
// main is the entry point for the application
@ -35,3 +38,12 @@ func loadEnvImplFromArgs(args []string) (config.Hway, error) {
}
return env, nil
}
func setupPostgresDB() (*hwayorm.Queries, error) {
pgdsn := fmt.Sprintf("host=%s user=%s password=%s dbname=%s sslmode=disable", psqlHost, psqlUser, psqlPass, psqlDB)
conn, err := pgx.Connect(context.Background(), pgdsn)
if err != nil {
return nil, err
}
return hwayorm.New(conn), nil
}

View File

@ -4,13 +4,18 @@
package main
import (
"context"
"database/sql"
"encoding/json"
"syscall/js"
"github.com/labstack/echo/v4"
"github.com/onsonr/sonr/pkg/vault/routes"
_ "github.com/ncruces/go-sqlite3/driver"
_ "github.com/ncruces/go-sqlite3/embed"
"github.com/onsonr/sonr/cmd/motr/wasm"
"github.com/onsonr/sonr/internal/config/motr"
sink "github.com/onsonr/sonr/internal/models/sink/sqlite"
vault "github.com/onsonr/sonr/pkg/vault/routes"
)
var (
@ -52,3 +57,17 @@ func main() {
vault.RegisterRoutes(e, config)
wasm.ServeFetch(e)
}
// NewDB initializes and returns a configured database connection
func NewDB() (*sql.DB, error) {
db, err := sql.Open("sqlite3", ":memory:")
if err != nil {
return nil, err
}
// create tables
if _, err := db.ExecContext(context.Background(), sink.SchemaMotrSQL); err != nil {
return nil, err
}
return db, nil
}

View File

@ -1,11 +0,0 @@
<div style="text-align: center;">
# Page Not Found
![A UFO takes one of the little worker monsters](/assets/images/undraw-taken.svg)
The page you were looking for couldn't be found.
Press [[/]] to search, or [head back to the homepage](/).
</div>

View File

@ -1 +0,0 @@
# Sonr Blockchain

View File

@ -1,11 +0,0 @@
<div style="text-align: center;">
# Page Not Found
![A UFO takes one of the little worker monsters](/assets/images/undraw-taken.svg)
The page you were looking for couldn't be found.
Press [[/]] to search, or [head back to the homepage](/).
</div>

View File

@ -0,0 +1,569 @@
# Protocol Buffers in Cosmos SDK
## Overview
The Cosmos SDK uses Protocol Buffers for serialization and API definitions. Generation is handled via a Docker image: `ghcr.io/cosmos/proto-builder:0.15.x`.
## Generation Tools
- **Buf**: Primary tool for protobuf management
- **protocgen.sh**: Core generation script in `scripts/`
- **Makefile Commands**: Standard commands for generate, lint, format
## Key Components
### Buf Configuration
1. **Workspace Setup**
- Root level buf workspace configuration
- Manages multiple protobuf directories
2. **Directory Structure**
```
proto/
├── buf.gen.gogo.yaml # GoGo Protobuf generation
├── buf.gen.pulsar.yaml # Pulsar API generation
├── buf.gen.swagger.yaml # OpenAPI/Swagger docs
├── buf.lock # Dependencies
├── buf.yaml # Core configuration
├── cosmos/ # Core protos
└── tendermint/ # Consensus protos
```
3. **Module Protos**
- Located in `x/{moduleName}/proto`
- Module-specific message definitions
#### `buf.gen.gogo.yaml`
`buf.gen.gogo.yaml` defines how the protobuf files should be generated for use with in the module. This file uses [gogoproto](https://github.com/gogo/protobuf), a separate generator from the google go-proto generator that makes working with various objects more ergonomic, and it has more performant encode and decode steps
```go reference
https://github.com/cosmos/cosmos-sdk/blob/main/proto/buf.gen.gogo.yaml#L1-L9
```
#### `buf.gen.pulsar.yaml`
`buf.gen.pulsar.yaml` defines how protobuf files should be generated using the [new golang apiv2 of protobuf](https://go.dev/blog/protobuf-apiv2). This generator is used instead of the google go-proto generator because it has some extra helpers for Cosmos SDK applications and will have more performant encode and decode than the google go-proto generator. You can follow the development of this generator [here](https://github.com/cosmos/cosmos-proto).
```go reference
https://github.com/cosmos/cosmos-sdk/blob/main/proto/buf.gen.pulsar.yaml#L1-L18
```
#### `buf.gen.swagger.yaml`
`buf.gen.swagger.yaml` generates the swagger documentation for the query and messages of the chain. This will only define the REST API end points that were defined in the query and msg servers. You can find examples of this [here](https://github.com/cosmos/cosmos-sdk/blob/main/x/bank/proto/cosmos/bank/v1beta1/query.proto)
```go reference
https://github.com/cosmos/cosmos-sdk/blob/main/proto/buf.gen.swagger.yaml#L1-L6
```
#### `buf.lock`
This is an autogenerated file based off the dependencies required by the `.gen` files. There is no need to copy the current one. If you depend on cosmos-sdk proto definitions a new entry for the Cosmos SDK will need to be provided. The dependency you will need to use is `buf.build/cosmos/cosmos-sdk`.
```go reference
https://github.com/cosmos/cosmos-sdk/blob/main/proto/buf.lock#L1-L16
```
#### `buf.yaml`
`buf.yaml` defines the [name of your package](https://github.com/cosmos/cosmos-sdk/blob/main/proto/buf.yaml#L3), which [breakage checker](https://buf.build/docs/tutorials/getting-started-with-buf-cli#detect-breaking-changes) to use and how to [lint your protobuf files](https://buf.build/docs/tutorials/getting-started-with-buf-cli#lint-your-api).
It is advised to use a tagged version of the buf modules corresponding to the version of the Cosmos SDK being are used.
```go reference
https://github.com/cosmos/cosmos-sdk/blob/main/proto/buf.yaml#L1-L24
```
We use a variety of linters for the Cosmos SDK protobuf files. The repo also checks this in ci.
A reference to the github actions can be found [here](https://github.com/cosmos/cosmos-sdk/blob/main/.github/workflows/proto.yml#L1-L32)
# ORM
The Cosmos SDK ORM is a state management library that provides a rich, but opinionated set of tools for managing a
module's state. It provides support for:
- type safe management of state
- multipart keys
- secondary indexes
- unique indexes
- easy prefix and range queries
- automatic genesis import/export
- automatic query services for clients, including support for light client proofs (still in development)
- indexing state data in external databases (still in development)
## Design and Philosophy
The ORM's data model is inspired by the relational data model found in SQL databases. The core abstraction is a table
with a primary key and optional secondary indexes.
Because the Cosmos SDK uses protobuf as its encoding layer, ORM tables are defined directly in .proto files using
protobuf options. Each table is defined by a single protobuf `message` type and a schema of multiple tables is
represented by a single .proto file.
Table structure is specified in the same file where messages are defined in order to make it easy to focus on better
design of the state layer. Because blockchain state layout is part of the public API for clients (TODO: link to docs on
light client proofs), it is important to think about the state layout as being part of the public API of a module.
Changing the state layout actually breaks clients, so it is ideal to think through it carefully up front and to aim for
a design that will eliminate or minimize breaking changes down the road. Also, good design of state enables building
more performant and sophisticated applications. Providing users with a set of tools inspired by relational databases
which have a long history of database design best practices and allowing schema to be specified declaratively in a
single place are design choices the ORM makes to enable better design and more durable APIs.
Also, by only supporting the table abstraction as opposed to key-value pair maps, it is easy to add to new
columns/fields to any data structure without causing a breaking change and the data structures can easily be indexed in
any off-the-shelf SQL database for more sophisticated queries.
The encoding of fields in keys is designed to support ordered iteration for all protobuf primitive field types
except for `bytes` as well as the well-known types `google.protobuf.Timestamp` and `google.protobuf.Duration`. Encodings
are optimized for storage space when it makes sense (see the documentation in `cosmos/orm/v1/orm.proto` for more details)
and table rows do not use extra storage space to store key fields in the value.
We recommend that users of the ORM attempt to follow database design best practices such as
[normalization](https://en.wikipedia.org/wiki/Database_normalization) (at least 1NF).
For instance, defining `repeated` fields in a table is considered an anti-pattern because breaks first normal form (1NF).
Although we support `repeated` fields in tables, they cannot be used as key fields for this reason. This may seem
restrictive but years of best practice (and also experience in the SDK) have shown that following this pattern
leads to easier to maintain schemas.
To illustrate the motivation for these principles with an example from the SDK, historically balances were stored
as a mapping from account -> map of denom to amount. This did not scale well because an account with 100 token balances
needed to be encoded/decoded every time a single coin balance changed. Now balances are stored as account,denom -> amount
as in the example above. With the ORM's data model, if we wanted to add a new field to `Balance` such as
`unlocked_balance` (if vesting accounts were redesigned in this way), it would be easy to add it to this table without
requiring a data migration. Because of the ORM's optimizations, the account and denom are only stored in the key part
of storage and not in the value leading to both a flexible data model and efficient usage of storage.
## Defining Tables
To define a table:
1. create a .proto file to describe the module's state (naming it `state.proto` is recommended for consistency),
and import "cosmos/orm/v1/orm.proto", ex:
```protobuf
syntax = "proto3";
package bank_example;
import "cosmos/orm/v1/orm.proto";
```
2. define a `message` for the table, ex:
```protobuf
message Balance {
bytes account = 1;
string denom = 2;
uint64 balance = 3;
}
```
3. add the `cosmos.orm.v1.table` option to the table and give the table an `id` unique within this .proto file:
```protobuf
message Balance {
option (cosmos.orm.v1.table) = {
id: 1
};
bytes account = 1;
string denom = 2;
uint64 balance = 3;
}
```
4. define the primary key field or fields, as a comma-separated list of the fields from the message which should make
up the primary key:
```protobuf
message Balance {
option (cosmos.orm.v1.table) = {
id: 1
primary_key: { fields: "account,denom" }
};
bytes account = 1;
string denom = 2;
uint64 balance = 3;
}
```
5. add any desired secondary indexes by specifying an `id` unique within the table and a comma-separate list of the
index fields:
```protobuf
message Balance {
option (cosmos.orm.v1.table) = {
id: 1;
primary_key: { fields: "account,denom" }
index: { id: 1 fields: "denom" } // this allows querying for the accounts which own a denom
};
bytes account = 1;
string denom = 2;
uint64 amount = 3;
}
```
### Auto-incrementing Primary Keys
A common pattern in SDK modules and in database design is to define tables with a single integer `id` field with an
automatically generated primary key. In the ORM we can do this by setting the `auto_increment` option to `true` on the
primary key, ex:
```protobuf
message Account {
option (cosmos.orm.v1.table) = {
id: 2;
primary_key: { fields: "id", auto_increment: true }
};
uint64 id = 1;
bytes address = 2;
}
```
### Unique Indexes
A unique index can be added by setting the `unique` option to `true` on an index, ex:
```protobuf
message Account {
option (cosmos.orm.v1.table) = {
id: 2;
primary_key: { fields: "id", auto_increment: true }
index: {id: 1, fields: "address", unique: true}
};
uint64 id = 1;
bytes address = 2;
}
```
### Singletons
The ORM also supports a special type of table with only one row called a `singleton`. This can be used for storing
module parameters. Singletons only need to define a unique `id` and that cannot conflict with the id of other
tables or singletons in the same .proto file. Ex:
```protobuf
message Params {
option (cosmos.orm.v1.singleton) = {
id: 3;
};
google.protobuf.Duration voting_period = 1;
uint64 min_threshold = 2;
}
```
## Running Codegen
NOTE: the ORM will only work with protobuf code that implements the [google.golang.org/protobuf](https://pkg.go.dev/google.golang.org/protobuf)
API. That means it will not work with code generated using gogo-proto.
To install the ORM's code generator, run:
```shell
go install cosmossdk.io/orm/cmd/protoc-gen-go-cosmos-orm@latest
```
The recommended way to run the code generator is to use [buf build](https://docs.buf.build/build/usage).
This is an example `buf.gen.yaml` that runs `protoc-gen-go`, `protoc-gen-go-grpc` and `protoc-gen-go-cosmos-orm`
using buf managed mode:
```yaml
version: v1
managed:
enabled: true
go_package_prefix:
default: foo.bar/api # the go package prefix of your package
override:
buf.build/cosmos/cosmos-sdk: cosmossdk.io/api # required to import the Cosmos SDK api module
plugins:
- name: go
out: .
opt: paths=source_relative
- name: go-grpc
out: .
opt: paths=source_relative
- name: go-cosmos-orm
out: .
opt: paths=source_relative
```
## Using the ORM in a module
### Initialization
To use the ORM in a module, first create a `ModuleSchemaDescriptor`. This tells the ORM which .proto files have defined
an ORM schema and assigns them all a unique non-zero id. Ex:
```go
var MyModuleSchema = &ormv1alpha1.ModuleSchemaDescriptor{
SchemaFile: []*ormv1alpha1.ModuleSchemaDescriptor_FileEntry{
{
Id: 1,
ProtoFileName: mymodule.File_my_module_state_proto.Path(),
},
},
}
```
In the ORM generated code for a file named `state.proto`, there should be an interface `StateStore` that got generated
with a constructor `NewStateStore` that takes a parameter of type `ormdb.ModuleDB`. Add a reference to `StateStore`
to your module's keeper struct. Ex:
```go
type Keeper struct {
db StateStore
}
```
Then instantiate the `StateStore` instance via an `ormdb.ModuleDB` that is instantiated from the `SchemaDescriptor`
above and one or more store services from `cosmossdk.io/core/store`. Ex:
```go
func NewKeeper(storeService store.KVStoreService) (*Keeper, error) {
modDb, err := ormdb.NewModuleDB(MyModuleSchema, ormdb.ModuleDBOptions{KVStoreService: storeService})
if err != nil {
return nil, err
}
db, err := NewStateStore(modDb)
if err != nil {
return nil, err
}
return Keeper{db: db}, nil
}
```
### Using the generated code
The generated code for the ORM contains methods for inserting, updating, deleting and querying table entries.
For each table in a .proto file, there is a type-safe table interface implemented in generated code. For instance,
for a table named `Balance` there should be a `BalanceTable` interface that looks like this:
```go
type BalanceTable interface {
Insert(ctx context.Context, balance *Balance) error
Update(ctx context.Context, balance *Balance) error
Save(ctx context.Context, balance *Balance) error
Delete(ctx context.Context, balance *Balance) error
Has(ctx context.Context, account []byte, denom string) (found bool, err error)
// Get returns nil and an error which responds true to ormerrors.IsNotFound() if the record was not found.
Get(ctx context.Context, account []byte, denom string) (*Balance, error)
List(ctx context.Context, prefixKey BalanceIndexKey, opts ...ormlist.Option) (BalanceIterator, error)
ListRange(ctx context.Context, from, to BalanceIndexKey, opts ...ormlist.Option) (BalanceIterator, error)
DeleteBy(ctx context.Context, prefixKey BalanceIndexKey) error
DeleteRange(ctx context.Context, from, to BalanceIndexKey) error
doNotImplement()
}
```
This `BalanceTable` should be accessible from the `StateStore` interface (assuming our file is named `state.proto`)
via a `BalanceTable()` accessor method. If all the above example tables/singletons were in the same `state.proto`,
then `StateStore` would get generated like this:
```go
type BankStore interface {
BalanceTable() BalanceTable
AccountTable() AccountTable
ParamsTable() ParamsTable
doNotImplement()
}
```
So to work with the `BalanceTable` in a keeper method we could use code like this:
```go
func (k keeper) AddBalance(ctx context.Context, acct []byte, denom string, amount uint64) error {
balance, err := k.db.BalanceTable().Get(ctx, acct, denom)
if err != nil && !ormerrors.IsNotFound(err) {
return err
}
if balance == nil {
balance = &Balance{
Account: acct,
Denom: denom,
Amount: amount,
}
} else {
balance.Amount = balance.Amount + amount
}
return k.db.BalanceTable().Save(ctx, balance)
}
```
`List` methods take `IndexKey` parameters. For instance, `BalanceTable.List` takes `BalanceIndexKey`. `BalanceIndexKey`
let's represent index keys for the different indexes (primary and secondary) on the `Balance` table. The primary key
in the `Balance` table gets a struct `BalanceAccountDenomIndexKey` and the first index gets an index key `BalanceDenomIndexKey`.
If we wanted to list all the denoms and amounts that an account holds, we would use `BalanceAccountDenomIndexKey`
with a `List` query just on the account prefix. Ex:
```go
it, err := keeper.db.BalanceTable().List(ctx, BalanceAccountDenomIndexKey{}.WithAccount(acct))
```
---
## sidebar_position: 1
# ProtocolBuffer Annotations
This document explains the various protobuf scalars that have been added to make working with protobuf easier for Cosmos SDK application developers
## Signer
Signer specifies which field should be used to determine the signer of a message for the Cosmos SDK. This field can be used for clients as well to infer which field should be used to determine the signer of a message.
Read more about the signer field [here](./02-messages-and-queries.md).
```protobuf reference
https://github.com/cosmos/cosmos-sdk/blob/e6848d99b55a65d014375b295bdd7f9641aac95e/proto/cosmos/bank/v1beta1/tx.proto#L40
```
```proto
option (cosmos.msg.v1.signer) = "from_address";
```
## Scalar
The scalar type defines a way for clients to understand how to construct protobuf messages according to what is expected by the module and sdk.
```proto
(cosmos_proto.scalar) = "cosmos.AddressString"
```
Example of account address string scalar:
```proto reference
https://github.com/cosmos/cosmos-sdk/blob/e6848d99b55a65d014375b295bdd7f9641aac95e/proto/cosmos/bank/v1beta1/tx.proto#L46
```
Example of validator address string scalar:
```proto reference
https://github.com/cosmos/cosmos-sdk/blob/e8f28bf5db18b8d6b7e0d94b542ce4cf48fed9d6/proto/cosmos/distribution/v1beta1/query.proto#L87
```
Example of pubkey scalar:
```proto reference
https://github.com/cosmos/cosmos-sdk/blob/11068bfbcd44a7db8af63b6a8aa079b1718f6040/proto/cosmos/staking/v1beta1/tx.proto#L94
```
Example of Decimals scalar:
```proto reference
https://github.com/cosmos/cosmos-sdk/blob/e8f28bf5db18b8d6b7e0d94b542ce4cf48fed9d6/proto/cosmos/distribution/v1beta1/distribution.proto#L26
```
Example of Int scalar:
```proto reference
https://github.com/cosmos/cosmos-sdk/blob/e8f28bf5db18b8d6b7e0d94b542ce4cf48fed9d6/proto/cosmos/gov/v1/gov.proto#L137
```
There are a few options for what can be provided as a scalar: `cosmos.AddressString`, `cosmos.ValidatorAddressString`, `cosmos.ConsensusAddressString`, `cosmos.Int`, `cosmos.Dec`.
## Implements_Interface
Implement interface is used to provide information to client tooling like [telescope](https://github.com/cosmology-tech/telescope) on how to encode and decode protobuf messages.
```proto
option (cosmos_proto.implements_interface) = "cosmos.auth.v1beta1.AccountI";
```
## Method,Field,Message Added In
`method_added_in`, `field_added_in` and `message_added_in` are annotations to denotate to clients that a field has been supported in a later version. This is useful when new methods or fields are added in later versions and that the client needs to be aware of what it can call.
The annotation should be worded as follow:
```proto
option (cosmos_proto.method_added_in) = "cosmos-sdk v0.50.1";
option (cosmos_proto.method_added_in) = "x/epochs v1.0.0";
option (cosmos_proto.method_added_in) = "simapp v24.0.0";
```
## Amino
The amino codec was removed in `v0.50+`, this means there is not a need register `legacyAminoCodec`. To replace the amino codec, Amino protobuf annotations are used to provide information to the amino codec on how to encode and decode protobuf messages.
:::note
Amino annotations are only used for backwards compatibility with amino. New modules are not required use amino annotations.
:::
The below annotations are used to provide information to the amino codec on how to encode and decode protobuf messages in a backwards compatible manner.
### Name
Name specifies the amino name that would show up for the user in order for them see which message they are signing.
```proto
option (amino.name) = "cosmos-sdk/BaseAccount";
```
```proto reference
https://github.com/cosmos/cosmos-sdk/blob/e8f28bf5db18b8d6b7e0d94b542ce4cf48fed9d6/proto/cosmos/bank/v1beta1/tx.proto#L41
```
### Field_Name
Field name specifies the amino name that would show up for the user in order for them see which field they are signing.
```proto
uint64 height = 1 [(amino.field_name) = "public_key"];
```
```proto reference
https://github.com/cosmos/cosmos-sdk/blob/e8f28bf5db18b8d6b7e0d94b542ce4cf48fed9d6/proto/cosmos/distribution/v1beta1/distribution.proto#L166
```
### Dont_OmitEmpty
Dont omitempty specifies that the field should not be omitted when encoding to amino.
```proto
repeated cosmos.base.v1beta1.Coin amount = 3 [(amino.dont_omitempty) = true];
```
```proto reference
https://github.com/cosmos/cosmos-sdk/blob/e8f28bf5db18b8d6b7e0d94b542ce4cf48fed9d6/proto/cosmos/bank/v1beta1/bank.proto#L56
```
### Encoding
Encoding instructs the amino json marshaler how to encode certain fields that may differ from the standard encoding behaviour. The most common example of this is how `repeated cosmos.base.v1beta1.Coin` is encoded when using the amino json encoding format. The `legacy_coins` option tells the json marshaler [how to encode a null slice](https://github.com/cosmos/cosmos-sdk/blob/e8f28bf5db18b8d6b7e0d94b542ce4cf48fed9d6/x/tx/signing/aminojson/json_marshal.go#L65) of `cosmos.base.v1beta1.Coin`.
```proto
(amino.encoding) = "legacy_coins",
```
```proto reference
https://github.com/cosmos/cosmos-sdk/blob/e8f28bf5db18b8d6b7e0d94b542ce4cf48fed9d6/proto/cosmos/bank/v1beta1/genesis.proto#L23
```
Another example is a protobuf `bytes` that contains a valid JSON document.
The `inline_json` option tells the json marshaler to embed the JSON bytes into the wrapping document without escaping.
```proto
(amino.encoding) = "inline_json",
```
E.g. the bytes containing `{"foo":123}` in the `envelope` field would lead to the following JSON:
```json
{
"envelope": {
"foo": 123
}
}
```
If the bytes are not valid JSON, this leads to JSON broken documents. Thus a JSON validity check needs to be in place at some point of the process.

View File

@ -0,0 +1,627 @@
# RFC 004: Account System Refactor
## Status
- Draft v2 (May 2023)
## Current Limitations
1. **Account Representation**: Limited by `google.Protobuf.Any` encapsulation and basic authentication methods
2. **Interface Constraints**: Lacks support for advanced functionalities like vesting and complex auth systems
3. **Implementation Rigidity**: Poor differentiation between account types (e.g., `ModuleAccount`)
4. **Authorization System**: Basic `x/auth` module with limited scope beyond `x/bank` functionality
5. **Dependency Issues**: Cyclic dependencies between modules (e.g., `x/auth``x/bank` for vesting)
## Proposal
This proposal aims to transform the way accounts are managed within the Cosmos SDK by introducing significant changes to
their structure and functionality.
### Rethinking Account Representation and Business Logic
Instead of representing accounts as simple `google.Protobuf.Any` structures stored in state with no business logic
attached, this proposal suggests a more sophisticated account representation that is closer to module entities.
In fact, accounts should be able to receive messages and process them in the same way modules do, and be capable of storing
state in a isolated (prefixed) portion of state belonging only to them, in the same way as modules do.
### Account Message Reception
We propose that accounts should be able to receive messages in the same way modules can, allowing them to manage their
own state modifications without relying on other modules. This change would enable more advanced account functionality, such as the
`VestingAccount` example, where the x/bank module previously needed to change the vestingState by casting the abstracted
account to `VestingAccount` and triggering the `TrackDelegation` call. Accounts are already capable of sending messages when
a state transition, originating from a transaction, is executed.
When accounts receive messages, they will be able to identify the sender of the message and decide how to process the
state transition, if at all.
### Consequences
These changes would have significant implications for the Cosmos SDK, resulting in a system of actors that are equal from
the runtime perspective. The runtime would only be responsible for propagating messages between actors and would not
manage the authorization system. Instead, actors would manage their own authorizations. For instance, there would be no
need for the `x/auth` module to manage minting or burning of coins permissions, as it would fall within the scope of the
`x/bank` module.
The key difference between accounts and modules would lie in the origin of the message (state transition). Accounts
(ExternallyOwnedAccount), which have credentials (e.g., a public/private key pairing), originate state transitions from
transactions. In contrast, module state transitions do not have authentication credentials backing them and can be
caused by two factors: either as a consequence of a state transition coming from a transaction or triggered by a scheduler
(e.g., the runtime's Begin/EndBlock).
By implementing these proposed changes, the Cosmos SDK will benefit from a more extensible, versatile, and efficient account
management system that is better suited to address the requirements of the Cosmos ecosystem.
#### Standardization
With `x/accounts` allowing a modular api there becomes a need for standardization of accounts or the interfaces wallets and other clients should expect to use. For this reason we will be using the [`CIP` repo](https://github.com/cosmos/cips) in order to standardize interfaces in order for wallets to know what to expect when interacting with accounts.
## Implementation
### Account Definition
We define the new `Account` type, which is what an account needs to implement to be treated as such.
An `Account` type is defined at APP level, so it cannot be dynamically loaded as the chain is running without upgrading the
node code, unless we create something like a `CosmWasmAccount` which is an account backed by an `x/wasm` contract.
```go
// Account is what the developer implements to define an account.
type Account[InitMsg proto.Message] interface {
// Init is the function that initialises an account instance of a given kind.
// InitMsg is used to initialise the initial state of an account.
Init(ctx *Context, msg InitMsg) error
// RegisterExecuteHandlers registers an account's execution messages.
RegisterExecuteHandlers(executeRouter *ExecuteRouter)
// RegisterQueryHandlers registers an account's query messages.
RegisterQueryHandlers(queryRouter *QueryRouter)
// RegisterMigrationHandlers registers an account's migration messages.
RegisterMigrationHandlers(migrationRouter *MigrationRouter)
}
```
### The InternalAccount definition
The public `Account` interface implementation is then converted by the runtime into an `InternalAccount` implementation,
which contains all the information and business logic needed to operate the account.
```go
type Schema struct {
state StateSchema // represents the state of an account
init InitSchema // represents the init msg schema
exec ExecSchema // represents the multiple execution msg schemas, containing also responses
query QuerySchema // represents the multiple query msg schemas, containing also responses
migrate *MigrateSchema // represents the multiple migrate msg schemas, containing also responses, it's optional
}
type InternalAccount struct {
init func(ctx *Context, msg proto.Message) (*InitResponse, error)
execute func(ctx *Context, msg proto.Message) (*ExecuteResponse, error)
query func(ctx *Context, msg proto.Message) (proto.Message, error)
schema func() *Schema
migrate func(ctx *Context, msg proto.Message) (*MigrateResponse, error)
}
```
This is an internal view of the account as intended by the system. It is not meant to be what developers implement. An
example implementation of the `InternalAccount` type can be found in [this](https://github.com/testinginprod/accounts-poc/blob/main/examples/recover/recover.go)
example of account whose credentials can be recovered. In fact, even if the `Internal` implementation is untyped (with
respect to `proto.Message`), the concrete implementation is fully typed.
During any of the execution methods of `InternalAccount`, `schema` excluded, the account is given a `Context` which provides:
- A namespaced `KVStore` for the account, which isolates the account state from others (NOTE: no `store keys` needed,
the account address serves as `store key`).
- Information regarding itself (its address)
- Information regarding the sender.
- ...
#### Init
Init defines the entrypoint that allows for a new account instance of a given kind to be initialised.
The account is passed some opaque protobuf message which is then interpreted and contains the instructions that
constitute the initial state of an account once it is deployed.
An `Account` code can be deployed multiple times through the `Init` function, similar to how a `CosmWasm` contract code
can be deployed (Instantiated) multiple times.
#### Execute
Execute defines the entrypoint that allows an `Account` to process a state transition, the account can decide then how to
process the state transition based on the message provided and the sender of the transition.
#### Query
Query defines a read-only entrypoint that provides a stable interface that links an account with its state. The reason for
which `Query` is still being preferred as an addition to raw state reflection is to:
- Provide a stable interface for querying (state can be optimised and change more frequently than a query)
- Provide a way to define an account `Interface` with respect to its `Read/Write` paths.
- Provide a way to query information that cannot be processed from raw state reflection, ex: compute information from lazy
state that has not been yet concretely processed (eg: balances with respect to lazy inputs/outputs)
#### Schema
Schema provides the definition of an account from `API` perspective, and it's the only thing that should be taken into account
when interacting with an account from another account or module, for example: an account is an `authz-interface` account if
it has the following message in its execution messages `MsgProxyStateTransition{ state_transition: google.Protobuf.Any }`.
### Migrate
Migrate defines the entrypoint that allows an `Account` to migrate its state from a previous version to a new one. Migrations
can be initiated only by the account itself, concretely this means that the migrate action sender can only be the account address
itself, if the account wants to allow another address to migrate it on its behalf then it could create an execution message
that makes the account migrate itself.
### x/accounts module
In order to create accounts we define a new module `x/accounts`, note that `x/accounts` deploys account with no authentication
credentials attached to it which means no action of an account can be incepted from a TX, we will later explore how the
`x/authn` module uses `x/accounts` to deploy authenticated accounts.
This also has another important implication for which account addresses are now fully decoupled from the authentication mechanism
which makes in turn off-chain operations a little more complex, as the chain becomes the real link between account identifier
and credentials.
We could also introduce a way to deterministically compute the account address.
Note, from the transaction point of view, the `init_message` and `execute_message` are opaque `google.Protobuf.Any`.
The module protobuf definition for `x/accounts` are the following:
```protobuf
// Msg defines the Msg service.
service Msg {
rpc Deploy(MsgDeploy) returns (MsgDeployResponse);
rpc Execute(MsgExecute) returns (MsgExecuteResponse);
rpc Migrate(MsgMigrate) returns (MsgMigrateResponse);
}
message MsgDeploy {
string sender = 1;
string kind = 2;
google.Protobuf.Any init_message = 3;
repeated google.Protobuf.Any authorize_messages = 4 [(gogoproto.nullable) = false];
}
message MsgDeployResponse {
string address = 1;
uint64 id = 2;
google.Protobuf.Any data = 3;
}
message MsgExecute {
string sender = 1;
string address = 2;
google.Protobuf.Any message = 3;
repeated google.Protobuf.Any authorize_messages = 4 [(gogoproto.nullable) = false];
}
message MsgExecuteResponse {
google.Protobuf.Any data = 1;
}
message MsgMigrate {
string sender = 1;
string new_account_kind = 2;
google.Protobuf.Any migrate_message = 3;
}
message MsgMigrateResponse {
google.Protobuf.Any data = 1;
}
```
#### MsgDeploy
Deploys a new instance of the given account `kind` with initial settings represented by the `init_message` which is a `google.Protobuf.Any`.
Of course the `init_message` can be empty. A response is returned containing the account ID and humanised address, alongside some response
that the account instantiation might produce.
#### Address derivation
In order to decouple public keys from account addresses, we introduce a new address derivation mechanism which is
#### MsgExecute
Sends a `StateTransition` execution request, where the state transition is represented by the `message` which is a `google.Protobuf.Any`.
The account can then decide if to process it or not based on the `sender`.
### MsgMigrate
Migrates an account to a new version of itself, the new version is represented by the `new_account_kind`. The state transition
can only be incepted by the account itself, which means that the `sender` must be the account address itself. During the migration
the account current state is given to the new version of the account, which then executes the migration logic using the `migrate_message`,
it might change state or not, it's up to the account to decide. The response contains possible data that the account might produce
after the migration.
#### Authorize Messages
The `Deploy` and `Execute` messages have a field in common called `authorize_messages`, these messages are messages that the account
can execute on behalf of the sender. For example, in case an account is expecting some funds to be sent from the sender,
the sender can attach a `MsgSend` that the account can execute on the sender's behalf. These authorizations are short-lived,
they live only for the duration of the `Deploy` or `Execute` message execution, or until they are consumed.
An alternative would have been to add a `funds` field, like it happens in cosmwasm, which guarantees the called contract that
the funds are available and sent in the context of the message execution. This would have been a simpler approach, but it would
have been limited to the context of `MsgSend` only, where the asset is `sdk.Coins`. The proposed generic way, instead, allows
the account to execute any message on behalf of the sender, which is more flexible, it could include NFT send execution, or
more complex things like `MsgMultiSend` or `MsgDelegate`, etc.
### Further discussion
#### Sub-accounts
We could provide a way to link accounts to other accounts. Maybe during deployment the sender could decide to link the
newly created to its own account, although there might be use-cases for which the deployer is different from the account
that needs to be linked, in this case a handshake protocol on linking would need to be defined.
#### Predictable address creation
We need to provide a way to create an account with a predictable address, this might serve a lot of purposes, like accounts
wanting to generate an address that:
- nobody else can claim besides the account used to generate the new account
- is predictable
For example:
```protobuf
message MsgDeployPredictable {
string sender = 1;
uint32 nonce = 2;
...
}
```
And then the address becomes `bechify(concat(sender, nonce))`
`x/accounts` would still use the monotonically increasing sequence as account number.
#### Joining Multiple Accounts
As developers are building new kinds of accounts, it becomes necessary to provide a default way to combine the
functionalities of different account types. This allows developers to avoid duplicating code and enables end-users to
create or migrate to accounts with multiple functionalities without requiring custom development.
To address this need, we propose the inclusion of a default account type called "MultiAccount". The MultiAccount type is
designed to merge the functionalities of other accounts by combining their execution, query, and migration APIs.
The account joining process would only fail in the case of API (intended as non-state Schema APIs) conflicts, ensuring
compatibility and consistency.
With the introduction of the MultiAccount type, users would have the option to either migrate their existing accounts to
a MultiAccount type or extend an existing MultiAccount with newer APIs. This flexibility empowers users to leverage
various account functionalities without compromising compatibility or resorting to manual code duplication.
The MultiAccount type serves as a standardized solution for combining different account functionalities within the
cosmos-sdk ecosystem. By adopting this approach, developers can streamline the development process and users can benefit
from a modular and extensible account system.
# ADR 071: Cryptography v2- Multi-curve support
## Change log
- May 7th 2024: Initial Draft (Zondax AG: @raynaudoe @juliantoledano @jleni @educlerici-zondax @lucaslopezf)
- June 13th 2024: Add CometBFT implementation proposal (Zondax AG: @raynaudoe @juliantoledano @jleni @educlerici-zondax @lucaslopezf)
- July 2nd 2024: Split ADR proposal, add link to ADR in cosmos/crypto (Zondax AG: @raynaudoe @juliantoledano @jleni @educlerici-zondax @lucaslopezf)
## Status
DRAFT
## Abstract
This ADR proposes the refactoring of the existing `Keyring` and `cosmos-sdk/crypto` code to implement [ADR-001-CryptoProviders](https://github.com/cosmos/crypto/blob/main/docs/architecture/adr-001-crypto-provider.md).
For in-depth details of the `CryptoProviders` and their design please refer to ADR mentioned above.
## Introduction
The introduction of multi-curve support in the cosmos-sdk cryptographic package offers significant advantages. By not being restricted to a single cryptographic curve, developers can choose the most appropriate curve based on security, performance, and compatibility requirements. This flexibility enhances the application's ability to adapt to evolving security standards and optimizes performance for specific use cases, helping to future-proofing the sdk's cryptographic capabilities.
The enhancements in this proposal not only render the ["Keyring ADR"](https://github.com/cosmos/cosmos-sdk/issues/14940) obsolete, but also encompass its key aspects, replacing it with a more flexible and comprehensive approach. Furthermore, the gRPC service proposed in the mentioned ADR can be easily implemented as a specialized `CryptoProvider`.
### Glossary
1. **Interface**: In the context of this document, "interface" refers to Go's interface.
2. **Module**: In this document, "module" refers to a Go module.
3. **Package**: In the context of Go, a "package" refers to a unit of code organization.
## Context
In order to fully understand the need for changes and the proposed improvements, it's crucial to consider the current state of affairs:
- The Cosmos SDK currently lacks a comprehensive ADR for the cryptographic package.
- If a blockchain project requires a cryptographic curve that is not supported by the current SDK, the most likely scenario is that they will need to fork the SDK repository and make modifications. These modifications could potentially make the fork incompatible with future updates from the upstream SDK, complicating maintenance and integration.
- Type leakage of specific crypto data types expose backward compatibility and extensibility challenges.
- The demand for a more flexible and extensible approach to cryptography and address management is high.
- Architectural changes are necessary to resolve many of the currently open issues related to new curves support.
- There is a current trend towards modularity in the Interchain stack (e.g., runtime modules).
- Security implications are a critical consideration during the redesign work.
## Objectives
The key objectives for this proposal are:
- Leverage `CryptoProviders`: Utilize them as APIs for cryptographic tools, ensuring modularity, flexibility, and ease of integration.
Developer-Centric Approach
- Prioritize clear, intuitive interfaces and best-practice design principles.
Quality Assurance
- Enhanced Test Coverage: Improve testing methodologies to ensure the robustness and reliability of the module.
## Technical Goals
New Keyring:
- Design a new `Keyring` interface with modular backends injection system to support hardware devices and cloud-based HSMs. This feature is optional and tied to complexity; if it proves too complex, it will be deferred to a future release as an enhancement.
## Proposed architecture
### Components
The main components to be used will be the same as those found in the [ADR-001](https://github.com/cosmos/crypto/blob/main/docs/architecture/adr-001-crypto-provider.md#components).
#### Storage and persistence
The storage and persistence layer is tasked with storing a `CryptoProvider`s. Specifically, this layer must:
- Securely store the crypto provider's associated private key (only if stored locally, otherwise a reference to the private key will be stored instead).
- Store the [`ProviderMetadata`](https://github.com/cosmos/crypto/blob/main/docs/architecture/adr-001-crypto-provider.md#metadata) struct which contains the data that distinguishes that provider.
The purpose of this layer is to ensure that upon retrieval of the persisted data, we can access the provider's type, version, and specific configuration (which varies based on the provider type). This information will subsequently be utilized to initialize the appropriate factory, as detailed in the following section on the factory pattern.
The storage proposal involves using a modified version of the [Record](https://github.com/cosmos/cosmos-sdk/blob/main/proto/cosmos/crypto/keyring/v1/record.proto) struct, which is already defined in **Keyring/v1**. Additionally, we propose utilizing the existing keyring backends (keychain, filesystem, memory, etc.) to store these `Record`s in the same manner as the current **Keyring/v1**.
_Note: This approach will facilitate a smoother migration path from the current Keyring/v1 to the proposed architecture._
Below is the proposed protobuf message to be included in the modified `Record.proto` file
##### Protobuf message structure
The [record.proto](https://github.com/cosmos/cosmos-sdk/blob/main/proto/cosmos/crypto/keyring/v1/record.proto) file will be modified to include the `CryptoProvider` message as an optional field as follows.
```protobuf
// record.proto
message Record {
string name = 1;
google.protobuf.Any pub_key = 2;
oneof item {
Local local = 3;
Ledger ledger = 4;
Multi multi = 5;
Offline offline = 6;
CryptoProvider crypto_provider = 7; // <- New
}
message Local {
google.protobuf.Any priv_key = 1;
}
message Ledger {
hd.v1.BIP44Params path = 1;
}
message Multi {}
message Offline {}
}
```
##### Creating and loading a `CryptoProvider`
For creating providers, we propose a _factory pattern_ and a _registry_ for these builders. Examples of these
patterns can be found [here](https://github.com/cosmos/crypto/blob/main/docs/architecture/adr-001-crypto-provider.md#illustrative-code-snippets)
##### Keyring
The new `Keyring` interface will serve as a central hub for managing and fetching `CryptoProviders`. To ensure a smoother migration path, the new Keyring will be backward compatible with the previous version. Since this will be the main API from which applications will obtain their `CryptoProvider` instances, the proposal is to extend the Keyring interface to include the methods:
```go
type KeyringV2 interface {
// methods from Keyring/v1
// ListCryptoProviders returns a list of all the stored CryptoProvider metadata.
ListCryptoProviders() ([]ProviderMetadata, error)
// GetCryptoProvider retrieves a specific CryptoProvider by its id.
GetCryptoProvider(id string) (CryptoProvider, error)
}
```
_Note_: Methods to obtain a provider from a public key or other means that make it easier to load the desired provider can be added.
##### Especial use case: remote signers
It's important to note that the `CryptoProvider` interface is versatile enough to be implemented as a remote signer. This capability allows for the integration of remote cryptographic operations, which can be particularly useful in distributed or cloud-based environments where local cryptographic resources are limited or need to be managed centrally.
## Alternatives
It is important to note that all the code presented in this document is not in its final form and could be subject to changes at the time of implementation. The examples and implementations discussed should be interpreted as alternatives, providing a conceptual framework rather than definitive solutions. This flexibility allows for adjustments based on further insights, technical evaluations, or changing requirements as development progresses.
## Decision
We will:
- Leverage crypto providers
- Refactor the module structure as described above.
- Define types and interfaces as the code attached.
- Refactor existing code into new structure and interfaces.
- Implement Unit Tests to ensure no backward compatibility issues.
## Consequences
### Impact on the SDK codebase
We can divide the impact of this ADR into two main categories: state machine code and client related code.
#### Client
The major impact will be on the client side, where the current `Keyring` interface will be replaced by the new `KeyringV2` interface. At first, the impact will be low since `CryptoProvider` is an optional field in the `Record` message, so there's no mandatory requirement for migrating to this new concept right away. This allows a progressive transition where the risks of breaking changes or regressions are minimized.
#### State Machine
The impact on the state machine code will be minimal, the modules affected (at the time of writing this ADR)
are the `x/accounts` module, specifically the `Authenticate` function and the `x/auth/ante` module. This function will need to be adapted to use a `CryptoProvider` service to make use of the `Verifier` instance.
Worth mentioning that there's also the alternative of using `Verifier` instances in a standalone fashion (see note below).
The specific way to adapt these modules will be deeply analyzed and decided at implementation time of this ADR.
_Note_: All cryptographic tools (hashers, verifiers, signers, etc.) will continue to be available as standalone packages that can be imported and utilized directly without the need for a `CryptoProvider` instance. However, the `CryptoProvider` is the recommended method for using these tools as it offers a more secure way to handle sensitive data, enhanced modularity, and the ability to store configurations and metadata within the `CryptoProvider` definition.
### Backwards Compatibility
The proposed migration path is similar to what the cosmos-sdk has done in the past. To ensure a smooth transition, the following steps will be taken:
Once ADR-001 is implemented with a stable release:
- Deprecate the old crypto package. The old crypto package will still be usable, but it will be marked as deprecated and users can opt to use the new package.
- Migrate the codebase to use the new cosmos/crypto package and remove the old crypto one.
### Positive
- Single place of truth
- Easier to use interfaces
- Easier to extend
- Unit test for each crypto package
- Greater maintainability
- Incentivize addition of implementations instead of forks
- Decoupling behavior from implementation
- Sanitization of code
### Negative
- It will involve an effort to adapt existing code.
- It will require attention to detail and audition.
### Neutral
- It will involve extensive testing.
## Test Cases
- The code will be unit tested to ensure a high code coverage
- There should be integration tests around Keyring and CryptoProviders.
> While an ADR is in the DRAFT or PROPOSED stage, this section should contain a
> summary of issues to be solved in future iterations (usually referencing comments
> from a pull-request discussion).
>
> Later, this section can optionally list ideas or improvements the author or
> reviewers found during the analysis of this ADR.
# ADR-71 Bank V2
## Status
DRAFT
## Changelog
- 2024-05-08: Initial Draft (@samricotta, @julienrbrt)
## Abstract
The primary objective of refactoring the bank module is to simplify and enhance the functionality of the Cosmos SDK. Over time the bank module has been burdened with numerous responsibilities including transaction handling, account restrictions, delegation counting, and the minting and burning of coins.
In addition to the above, the bank module is currently too rigid and handles too many tasks, so this proposal aims to streamline the module by focusing on core functions `Send`, `Mint`, and `Burn`.
Currently, the module is split across different keepers with scattered and duplicates functionalities (with 4 send functions for instance).
Additionally, the integration of the token factory into the bank module allows for standardization, and better integration within the core modules.
This rewrite will reduce complexity and enhance the efficiency and UX of the bank module.
## Context
The current implementation of the bank module is characterised by its handling of a broad array of functions, leading to significant complexity in using and extending the bank module.
These issues have underscored the need for a refactoring strategy that simplifies the modules architecture and focuses on its most essential operations.
Additionally, there is an overlap in functionality with a Token Factory module, which could be integrated to streamline oper.
## Decision
**Permission Tightening**: Access to the module can be restricted to selected denominations only, ensuring that it operates within designated boundaries and does not exceed its intended scope. Currently, the permissions allow all denoms, so this should be changed. Send restrictions functionality will be maintained.
**Simplification of Logic**: The bank module will focus on core functionalities `Send`, `Mint`, and `Burn`. This refinement aims to streamline the architecture, enhancing both maintainability and performance.
**Integration of Token Factory**: The Token Factory will be merged into the bank module. This consolidation of related functionalities aims to reduce redundancy and enhance coherence within the system. Migrations functions will be provided for migrating from Osmosis' Token Factory module to bank/v2.
**Legacy Support**: A legacy wrapper will be implemented to ensure compatibility with about 90% of existing functions. This measure will facilitate a smooth transition while keeping older systems functional.
**Denom Implementation**: A asset interface will be added to standardise interactions such as transfers, balance inquiries, minting, and burning across different tokens. This will allow the bank module to support arbitrary asset types, enabling developers to implement custom, ERC20-like denominations.
For example, currently if a team would like to extend the transfer method the changes would apply universally, affecting all denoms. With the proposed Asset Interface, it allows teams to customise or extend the transfer method specifically for their own tokens without impacting others.
These improvements are expected to enhance the flexibility of the bank module, allowing for the creation of custom tokens similar to ERC20 standards and assets backed by CosmWasm (CW) contracts. The integration efforts will also aim to unify CW20 with bank coins across the Cosmos chains.
Example of denom interface:
```go
type AssetInterface interface {
Transfer(ctx sdk.Context, from sdk.AccAddress, to sdk.AccAddress, amount sdk.Coin) error
Mint(ctx sdk.Context, to sdk.AccAddress, amount sdk.Coin) error
Burn(ctx sdk.Context, from sdk.AccAddress, amount sdk.Coin) error
QueryBalance(ctx sdk.Context, account sdk.AccAddress) (sdk.Coin, error)
}
```
Overview of flow:
1. Alice initiates a transfer by entering Bob's address and the amount (100 ATOM)
2. The Bank module verifies that the ATOM token implements the `AssetInterface` by querying the `ATOM_Denom_Account`, which is an `x/account` denom account.
3. The Bank module executes the transfer by subtracting 100 ATOM from Alices balance and adding 100 ATOM to Bobs balance.
4. The Bank module calls the Transfer method on the `ATOM_Denom_Account`. The Transfer method, defined in the `AssetInterface`, handles the logic to subtract 100 ATOM from Alices balance and add 100 ATOM to Bobs balance.
5. The Bank module updates the chain and returns the new balances.
6. Both Alice and Bob successfully receive the updated balances.
## Migration Plans
Bank is a widely used module, so getting a v2 needs to be thought thoroughly. In order to not force all dependencies to immediately migrate to bank/v2, the same _upgrading_ path will be taken as for the `gov` module.
This means `cosmossdk.io/bank` will stay one module and there won't be a new `cosmossdk.io/bank/v2` go module. Instead the bank protos will be versioned from `v1beta1` (current bank) to `v2`.
Bank `v1beta1` endpoints will use the new bank v2 implementation for maximum backward compatibility.
The bank `v1beta1` keepers will be deprecated and potentially eventually removed, but its proto and messages definitions will remain.
Additionally, as bank plans to integrate token factory, migrations functions will be provided to migrate from Osmosis token factory implementation (most widely used implementation) to the new bank/v2 token factory.
## Consequences
### Positive
- Simplified interaction with bank APIs
- Backward compatible changes (no contracts or apis broken)
- Optional migration (note: bank `v1beta1` won't get any new feature after bank `v2` release)
### Neutral
- Asset implementation not available cross-chain (IBC-ed custom asset should possibly fallback to the default implementation)
- Many assets may slow down bank balances requests
### Negative
- Temporarily duplicate functionalities as bank `v1beta1` are `v2` are living alongside
- Difficultity to ever completely remove bank `v1beta1`
### References
- Current bank module implementation: https://github.com/cosmos/cosmos-sdk/blob/v0.50.6/x/bank/keeper/keeper.go#L22-L53
- Osmosis token factory: https://github.com/osmosis-labs/osmosis/tree/v25.0.0/x/tokenfactory/keeper

View File

@ -0,0 +1,685 @@
# Cosmos SDK Core Components
## Overview
The Cosmos SDK is a framework for building secure blockchain applications on CometBFT. It provides:
- ABCI implementation in Go
- Multi-store persistence layer
- Transaction routing system
## Transaction Flow
1. CometBFT consensus delivers transaction bytes
2. SDK decodes transactions and extracts messages
3. Messages routed to appropriate modules
4. State changes committed to stores
```mermaid
graph TD
A[CometBFT] -->|Tx Bytes| B[SDK Decode]
B -->|Messages| C[Module Router]
C -->|State Changes| D[Multi-store]
```
## `baseapp`
`baseapp` is the boilerplate implementation of a Cosmos SDK application. It comes with an implementation of the ABCI to handle the connection with the underlying consensus engine. Typically, a Cosmos SDK application extends `baseapp` by embedding it in [`app.go`](../beginner/00-app-anatomy.md#core-application-file).
Here is an example of this from `simapp`, the Cosmos SDK demonstration app:
```go reference
https://github.com/cosmos/cosmos-sdk/blob/v0.52.0-beta.1/simapp/app.go#L145-L186
```
The goal of `baseapp` is to provide a secure interface between the store and the extensible state machine while defining as little about the state machine as possible (staying true to the ABCI).
For more on `baseapp`, please click [here](../advanced/00-baseapp.md).
## Multistore
The Cosmos SDK provides a [`multistore`](../advanced/04-store.md#multistore) for persisting state. The multistore allows developers to declare any number of [`KVStores`](../advanced/04-store.md#base-layer-kvstores). These `KVStores` only accept the `[]byte` type as value and therefore any custom structure needs to be marshalled using [a codec](../advanced/05-encoding.md) before being stored.
The multistore abstraction is used to divide the state in distinct compartments, each managed by its own module. For more on the multistore, click [here](../advanced/04-store.md#multistore).
## Modules
The power of the Cosmos SDK lies in its modularity. Cosmos SDK applications are built by aggregating a collection of interoperable modules. Each module defines a subset of the state and contains its own message/transaction processor, while the Cosmos SDK is responsible for routing each message to its respective module.
Here is a simplified view of how a transaction is processed by the application of each full-node when it is received in a valid block:
```mermaid
flowchart TD
A[Transaction relayed from the full-node's CometBFT engine to the node's application via DeliverTx] --> B[APPLICATION]
B -->|"Using baseapp's methods: Decode the Tx, extract and route the message(s)"| C[Message routed to the correct module to be processed]
C --> D1[AUTH MODULE]
C --> D2[BANK MODULE]
C --> D3[STAKING MODULE]
C --> D4[GOV MODULE]
D1 -->|Handle message, Update state| E["Return result to CometBFT (0=Ok, 1=Err)"]
D2 -->|Handle message, Update state| E["Return result to CometBFT (0=Ok, 1=Err)"]
D3 -->|Handle message, Update state| E["Return result to CometBFT (0=Ok, 1=Err)"]
D4 -->|Handle message, Update state| E["Return result to CometBFT (0=Ok, 1=Err)"]
```
Each module can be seen as a little state-machine. Developers need to define the subset of the state handled by the module, as well as custom message types that modify the state (_Note:_ `messages` are extracted from `transactions` by `baseapp`). In general, each module declares its own `KVStore` in the `multistore` to persist the subset of the state it defines. Most developers will need to access other 3rd party modules when building their own modules. Given that the Cosmos SDK is an open framework, some of the modules may be malicious, which means there is a need for security principles to reason about inter-module interactions. These principles are based on [object-capabilities](../advanced/10-ocap.md). In practice, this means that instead of having each module keep an access control list for other modules, each module implements special objects called `keepers` that can be passed to other modules to grant a pre-defined set of capabilities.
Cosmos SDK modules are defined in the `x/` folder of the Cosmos SDK. Some core modules include:
- `x/auth`: Used to manage accounts and signatures.
- `x/bank`: Used to enable tokens and token transfers.
- `x/staking` + `x/slashing`: Used to build Proof-of-Stake blockchains.
In addition to the already existing modules in `x/`, which anyone can use in their app, the Cosmos SDK lets you build your own custom modules. You can check an [example of that in the tutorial](https://tutorials.cosmos.network/).# Keepers
:::note Synopsis
`Keeper`s refer to a Cosmos SDK abstraction whose role is to manage access to the subset of the state defined by various modules. `Keeper`s are module-specific, i.e. the subset of state defined by a module can only be accessed by a `keeper` defined in said module. If a module needs to access the subset of state defined by another module, a reference to the second module's internal `keeper` needs to be passed to the first one. This is done in `app.go` during the instantiation of module keepers.
:::
:::note Pre-requisite Readings
- [Introduction to Cosmos SDK Modules](./00-intro.md)
:::
## Motivation
The Cosmos SDK is a framework that makes it easy for developers to build complex decentralized applications from scratch, mainly by composing modules together. As the ecosystem of open-source modules for the Cosmos SDK expands, it will become increasingly likely that some of these modules contain vulnerabilities, as a result of the negligence or malice of their developer.
The Cosmos SDK adopts an [object-capabilities-based approach](https://docs.cosmos.network/main/learn/advanced/ocap#ocaps-in-practice) to help developers better protect their application from unwanted inter-module interactions, and `keeper`s are at the core of this approach. A `keeper` can be considered quite literally to be the gatekeeper of a module's store(s). Each store (typically an [`IAVL` Store](../../learn/advanced/04-store.md#iavl-store)) defined within a module comes with a `storeKey`, which grants unlimited access to it. The module's `keeper` holds this `storeKey` (which should otherwise remain unexposed), and defines [methods](#implementing-methods) for reading and writing to the store(s).
The core idea behind the object-capabilities approach is to only reveal what is necessary to get the work done. In practice, this means that instead of handling permissions of modules through access-control lists, module `keeper`s are passed a reference to the specific instance of the other modules' `keeper`s that they need to access (this is done in the [application's constructor function](../../learn/beginner/00-app-anatomy.md#constructor-function)). As a consequence, a module can only interact with the subset of state defined in another module via the methods exposed by the instance of the other module's `keeper`. This is a great way for developers to control the interactions that their own module can have with modules developed by external developers.
## Type Definition
`keeper`s are generally implemented in a `/keeper/keeper.go` file located in the module's folder. By convention, the type `keeper` of a module is simply named `Keeper` and usually follows the following structure:
```go
type Keeper struct {
// External keepers, if any
// Store key(s)
// codec
// authority
}
```
For example, here is the type definition of the `keeper` from the `staking` module:
```go reference
https://github.com/cosmos/cosmos-sdk/blob/v0.52.0-beta.1/x/staking/keeper/keeper.go#L54-L115
```
Let us go through the different parameters:
- An expected `keeper` is a `keeper` external to a module that is required by the internal `keeper` of said module. External `keeper`s are listed in the internal `keeper`'s type definition as interfaces. These interfaces are themselves defined in an `expected_keepers.go` file in the root of the module's folder. In this context, interfaces are used to reduce the number of dependencies, as well as to facilitate the maintenance of the module itself.
- `KVStoreService`s grant access to the store(s) of the [multistore](../../learn/advanced/04-store.md) managed by the module. They should always remain unexposed to external modules.
- `cdc` is the [codec](../../learn/advanced/05-encoding.md) used to marshal and unmarshal structs to/from `[]byte`. The `cdc` can be any of `codec.BinaryCodec`, `codec.JSONCodec` or `codec.Codec` based on your requirements. It can be either a proto or amino codec as long as they implement these interfaces.
- The authority listed is a module account or user account that has the right to change module level parameters. Previously this was handled by the param module, which has been deprecated.
Of course, it is possible to define different types of internal `keeper`s for the same module (e.g. a read-only `keeper`). Each type of `keeper` comes with its own constructor function, which is called from the [application's constructor function](../../learn/beginner/00-app-anatomy.md). This is where `keeper`s are instantiated, and where developers make sure to pass correct instances of modules' `keeper`s to other modules that require them.
## Implementing Methods
`Keeper`s primarily expose methods for business logic, as validity checks should have already been performed by the [`Msg` server](./03-msg-services.md) when `keeper`s' methods are called.
<!-- markdown-link-check-disable -->
State management is recommended to be done via [Collections](../packages/collections)
<!-- The above link is created via the script to generate docs -->
## State Management
In the Cosmos SDK, it is crucial to be methodical and selective when managing state within a module, as improper state management can lead to inefficiency, security risks, and scalability issues. Not all data belongs in the on-chain state; it's important to store only essential blockchain data that needs to be verified by consensus. Storing unnecessary information, especially client-side data, can bloat the state and slow down performance. Instead, developers should focus on using an off-chain database to handle supplementary data, extending the API as needed. This approach minimizes on-chain complexity, optimizes resource usage, and keeps the blockchain state lean and efficient, ensuring scalability and smooth operations.
The Cosmos SDK leverages Protocol Buffers (protobuf) for efficient state management, providing a well-structured, binary encoding format that ensures compatibility and performance across different modules. The SDKs recommended approach for managing state is through the [collections package](../pacakges/02-collections.md), which simplifies state handling by offering predefined data structures like maps and indexed sets, reducing the complexity of managing raw state data. While users can opt for custom encoding schemes if they need more flexibility or have specialized requirements, they should be aware that such custom implementations may not integrate seamlessly with indexers that decode state data on the fly. This could lead to challenges in data retrieval, querying, and interoperability, making protobuf a safer and more future-proof choice for most use cases.
# Folder Structure
:::note Synopsis
This document outlines the structure of Cosmos SDK modules. These ideas are meant to be applied as suggestions. Application developers are encouraged to improve upon and contribute to module structure and development design.
The required interface for a module is located in the module.go. Everything beyond this is suggestive.
:::
## Structure
A typical Cosmos SDK module can be structured as follows:
```shell
proto
└── {project_name}
   └── {module_name}
   └── {proto_version}
      ├── {module_name}.proto
      ├── genesis.proto
      ├── query.proto
      └── tx.proto
```
- `{module_name}.proto`: The module's common message type definitions.
- `genesis.proto`: The module's message type definitions related to genesis state.
- `query.proto`: The module's Query service and related message type definitions.
- `tx.proto`: The module's Msg service and related message type definitions.
```shell
x/{module_name}
├── client
│   ├── cli
│   │ ├── query.go
│   │   └── tx.go
│   └── testutil
│   ├── cli_test.go
│   └── suite.go
├── exported
│   └── exported.go
├── keeper
│   ├── genesis.go
│   ├── grpc_query.go
│   ├── hooks.go
│   ├── invariants.go
│   ├── keeper.go
│   ├── keys.go
│   ├── msg_server.go
│   └── querier.go
├── simulation
│   ├── decoder.go
│   ├── genesis.go
│   ├── operations.go
│   └── params.go
├── types
│   ├── {module_name}.pb.go
│ ├── codec.go
│ ├── errors.go
│ ├── events.go
│ ├── events.pb.go
│ ├── expected_keepers.go
│ ├── genesis.go
│ ├── genesis.pb.go
│ ├── keys.go
│ ├── msgs.go
│ ├── params.go
│ ├── query.pb.go
│ └── tx.pb.go
├── module.go
├── abci.go
├── autocli.go
├── depinject.go
└── README.md
```
- `client/`: The module's CLI client functionality implementation and the module's CLI testing suite.
- `exported/`: The module's exported types - typically interface types. If a module relies on keepers from another module, it is expected to receive the keepers as interface contracts through the `expected_keepers.go` file (see below) in order to avoid a direct dependency on the module implementing the keepers. However, these interface contracts can define methods that operate on and/or return types that are specific to the module that is implementing the keepers and this is where `exported/` comes into play. The interface types that are defined in `exported/` use canonical types, allowing for the module to receive the keepers as interface contracts through the `expected_keepers.go` file. This pattern allows for code to remain DRY and also alleviates import cycle chaos.
- `keeper/`: The module's `Keeper` and `MsgServer` implementation.
- `abci.go`: The module's `BeginBlocker` and `EndBlocker` implementations (this file is only required if `BeginBlocker` and/or `EndBlocker` need to be defined).
- `simulation/`: The module's [simulation](./14-simulator.md) package defines functions used by the blockchain simulator application (`simapp`).
- `README.md`: The module's specification documents outlining important concepts, state storage structure, and message and event type definitions. Learn more how to write module specs in the [spec guidelines](../spec/SPEC_MODULE.md).
- `types/`: includes type definitions for messages, events, and genesis state, including the type definitions generated by Protocol Buffers.
- `codec.go`: The module's registry methods for interface types.
- `errors.go`: The module's sentinel errors.
- `events.go`: The module's event types and constructors.
- `expected_keepers.go`: The module's [expected keeper](./06-keeper.md#type-definition) interfaces.
- `genesis.go`: The module's genesis state methods and helper functions.
- `keys.go`: The module's store keys and associated helper functions.
- `msgs.go`: The module's message type definitions and associated methods.
- `params.go`: The module's parameter type definitions and associated methods.
- `*.pb.go`: The module's type definitions generated by Protocol Buffers (as defined in the respective `*.proto` files above).
- The root directory includes the module's `AppModule` implementation.
- `autocli.go`: The module [autocli](https://docs.cosmos.network/main/core/autocli) options.
- `depinject.go`: The module [depinject](./15-depinject.md#type-definition) options.
> Note: although the above pattern is followed by most of the Cosmos SDK modules, there are some modules that don't follow this pattern. E.g `x/group` and `x/nft` dont have a `types` folder, instead all of the type definitions for messages, events, and genesis state are live in the root directory and the module's `AppModule` implementation lives in the `module` folder.
---
## sidebar_position: 1
# `Msg` Services
:::note Synopsis
A Protobuf `Msg` service processes [messages](./02-messages-and-queries.md#messages). Protobuf `Msg` services are specific to the module in which they are defined, and only process messages defined within the said module. They are called from `BaseApp` during [`FinalizeBlock`](../../learn/advanced/00-baseapp.md#finalizeblock).
:::
:::note Pre-requisite Readings
- [Module Manager](./01-module-manager.md)
- [Messages and Queries](./02-messages-and-queries.md)
:::
## Implementation of a module `Msg` service
Each module should define a Protobuf `Msg` service, which will be responsible for processing requests (implementing `sdk.Msg`) and returning responses.
As further described in [ADR 031](../architecture/adr-031-msg-service.md), this approach has the advantage of clearly specifying return types and generating server and client code.
Protobuf generates a `MsgServer` interface based on the definition of `Msg` service. It is the role of the module developer to implement this interface, by implementing the state transition logic that should happen upon receival of each `transaction.Msg`. As an example, here is the generated `MsgServer` interface for `x/bank`, which exposes two `transaction.Msg`s:
```go reference
https://github.com/cosmos/cosmos-sdk/blob/28fa3b8/x/bank/types/tx.pb.go#L564-L579
```
When possible, the existing module's [`Keeper`](./06-keeper.md) should implement `MsgServer`, otherwise a `msgServer` struct that embeds the `Keeper` can be created, typically in `./keeper/msg_server.go`:
```go reference
https://github.com/cosmos/cosmos-sdk/blob/28fa3b8/x/bank/keeper/msg_server.go#L16-L19
```
`msgServer` methods can retrieve the auxiliary information or services using the environment variable, it is always located in the keeper:
Environment:
```go reference
https://github.com/cosmos/cosmos-sdk/blob/07151304e2ec6a185243d083f59a2d543253cb15/core/appmodule/v2/environment.go#L14-L29
```
Keeper Example:
```go reference
https://github.com/cosmos/cosmos-sdk/blob/07151304e2ec6a185243d083f59a2d543253cb15/x/bank/keeper/keeper.go#L56-L58
```
`transaction.Msg` processing usually follows these 3 steps:
### Validation
The message server must perform all validation required (both _stateful_ and _stateless_) to make sure the `message` is valid.
The `signer` is charged for the gas cost of this validation.
For example, a `msgServer` method for a `transfer` message should check that the sending account has enough funds to actually perform the transfer.
It is recommended to implement all validation checks in a separate function that passes state values as arguments. This implementation simplifies testing. As expected, expensive validation functions charge additional gas. Example:
```go
ValidateMsgA(msg MsgA, now Time, gm GasMeter) error {
if now.Before(msg.Expire) {
return sdkerrors.ErrInvalidRequest.Wrap("msg expired")
}
gm.ConsumeGas(1000, "signature verification")
return signatureVerificaton(msg.Prover, msg.Data)
}
```
:::warning
Previously, the `ValidateBasic` method was used to perform simple and stateless validation checks.
This way of validating is deprecated, this means the `msgServer` must perform all validation checks.
:::
### State Transition
After the validation is successful, the `msgServer` method uses the [`keeper`](./06-keeper.md) functions to access the state and perform a state transition.
### Events
Before returning, `msgServer` methods generally emit one or more [events](../../learn/advanced/08-events.md) by using the `EventManager` held in `environment`.
There are two ways to emit events, typed events using protobuf or arbitrary key & values.
Typed Events:
```go
ctx.EventManager().EmitTypedEvent(
&group.EventABC{Key1: Value1, Key2, Value2})
```
Arbitrary Events:
```go
ctx.EventManager().EmitEvent(
sdk.NewEvent(
eventType, // e.g. sdk.EventTypeMessage for a message, types.CustomEventType for a custom event defined in the module
sdk.NewAttribute(key1, value1),
sdk.NewAttribute(key2, value2),
),
)
```
These events are relayed back to the underlying consensus engine and can be used by service providers to implement services around the application. Click [here](../../learn/advanced/08-events.md) to learn more about events.
The invoked `msgServer` method returns a `proto.Message` response and an `error`. These return values are then wrapped into an `*sdk.Result` or an `error`:
```go reference
https://github.com/cosmos/cosmos-sdk/blob/v0.50.0-alpha.0/baseapp/msg_service_router.go#L160
```
This method takes care of marshaling the `res` parameter to protobuf and attaching any events on the `EventManager()` to the `sdk.Result`.
```protobuf reference
https://github.com/cosmos/cosmos-sdk/blob/v0.50.0-alpha.0/proto/cosmos/base/abci/v1beta1/abci.proto#L93-L113
```
This diagram shows a typical structure of a Protobuf `Msg` service, and how the message propagates through the module.
```mermaid
sequenceDiagram
participant User
participant baseApp
participant router
participant handler
participant msgServer
participant keeper
participant EventManager
User->>baseApp: Transaction Type<Tx>
baseApp->>router: Route(ctx, msgRoute)
router->>handler: handler
handler->>msgServer: Msg<Tx>(Context, Msg(..))
alt addresses invalid, denominations wrong, etc.
msgServer->>handler: error
handler->>router: error
router->>baseApp: result, error code
else
msgServer->>keeper: perform action, update context
keeper->>msgServer: results, error code
msgServer->>EventManager: Emit relevant events
msgServer->>msgServer: maybe wrap results in more structure
msgServer->>handler: result, error code
handler->>router: result, error code
router->>baseApp: result, error code
end
baseApp->>User: result, error code
```
## Telemetry
New [telemetry metrics](../../learn/advanced/09-telemetry.md) can be created from `msgServer` methods when handling messages.
This is an example from the `x/auth/vesting` module:
```go reference
https://github.com/cosmos/cosmos-sdk/blob/v0.50.0-alpha.0/x/auth/vesting/msg_server.go#L76-L88
```
:::Warning
Telemetry adds a performance overhead to the chain. It is recommended to only use this in critical paths
:::
---
## sidebar_position: 1
# Query Services
:::note Synopsis
A Protobuf Query service processes [`queries`](./02-messages-and-queries.md#queries). Query services are specific to the module in which they are defined, and only process `queries` defined within said module. They are called from `BaseApp`'s [`Query` method](../../learn/advanced/00-baseapp.md#query).
:::
:::note Pre-requisite Readings
- [Module Manager](./01-module-manager.md)
- [Messages and Queries](./02-messages-and-queries.md)
:::
## Implementation of a module query service
### gRPC Service
When defining a Protobuf `Query` service, a `QueryServer` interface is generated for each module with all the service methods:
```go
type QueryServer interface {
QueryBalance(context.Context, *QueryBalanceParams) (*types.Coin, error)
QueryAllBalances(context.Context, *QueryAllBalancesParams) (*QueryAllBalancesResponse, error)
}
```
These custom queries methods should be implemented by a module's keeper, typically in `./keeper/grpc_query.go`. The first parameter of these methods is a generic `context.Context`. Therefore, the Cosmos SDK provides a function `sdk.UnwrapSDKContext` to retrieve the `context.Context` from the provided
`context.Context`.
Here's an example implementation for the bank module:
```go reference
https://github.com/cosmos/cosmos-sdk/blob/v0.50.0-alpha.0/x/bank/keeper/grpc_query.go
```
### Calling queries from the State Machine
The Cosmos SDK v0.47 introduces a new `cosmos.query.v1.module_query_safe` Protobuf annotation which is used to state that a query that is safe to be called from within the state machine, for example:
- a Keeper's query function can be called from another module's Keeper,
- ADR-033 intermodule query calls,
- CosmWasm contracts can also directly interact with these queries.
If the `module_query_safe` annotation set to `true`, it means:
- The query is deterministic: given a block height it will return the same response upon multiple calls, and doesn't introduce any state-machine breaking changes across SDK patch versions.
- Gas consumption never fluctuates across calls and across patch versions.
If you are a module developer and want to use `module_query_safe` annotation for your own query, you have to ensure the following things:
- the query is deterministic and won't introduce state-machine-breaking changes without coordinated upgrades
- it has its gas tracked, to avoid the attack vector where no gas is accounted for
on potentially high-computation queries.
***
sidebar_position: 1
---
# Blockchain Architecture
## Introduction
Blockchain architecture is a complex topic that involves many different components. In this section, we will cover the main layers of a blockchain application built with the Cosmos SDK.
At its core, a blockchain is a replicated deterministic state machine. This document explores the various layers of blockchain architecture, focusing on the execution, settlement, consensus, data availability, and interoperability layers.
```mermaid
graph TD
A[Modular SDK Blockchain Architecture]
A --> B[Execution Layer]
A --> C[Settlement Layer]
A --> D[Consensus Layer]
D --> E[Data Availability Layer]
A --> F[Interoperability Layer]
```
## Layered Architecture
Understanding blockchain architecture through the lens of different layers helps in comprehending its complex functionalities. We will give a high-level overview of the execution layer, settlement layer, consensus layer, data availability layer, and interoperability layer.
## Execution Layer
The Execution Layer is where the blockchain processes and executes transactions. The state machine within the blockchain handles the execution of transaction logic. This is done by the blockchain itself, ensuring that every transaction follows the predefined rules and state transitions. When a transaction is submitted, the execution layer processes it, updates the state, and ensures that the output is deterministic and consistent across all nodes. In the context of the Cosmos SDK, this typically involves predefined modules and transaction types rather than general-purpose smart contracts, which are used in chains with CosmWasm.
### State machine
At its core, a blockchain is a [replicated deterministic state machine](https://en.wikipedia.org/wiki/State_machine_replication).
A state machine is a computer science concept whereby a machine can have multiple states, but only one at any given time. There is a `state`, which describes the current state of the system, and `transactions`, that trigger state transitions.
Given a state S and a transaction T, the state machine will return a new state S'.
```mermaid
flowchart LR
A[S]
B[S']
A -->|"apply(T)"| B
```
In practice, the transactions are bundled in blocks to make the process more efficient. Given a state S and a block of transactions B, the state machine will return a new state S'.
```mermaid
flowchart LR
A[S]
B[S']
A -->|"For each T in B: apply(T)"| B
```
In a blockchain context, the state machine is [deterministic](https://en.wikipedia.org/wiki/Deterministic_system). This means that if a node is started at a given state and replays the same sequence of transactions, it will always end up with the same final state.
The Cosmos SDK gives developers maximum flexibility to define the state of their application, transaction types and state transition functions. The process of building state machines with the Cosmos SDK will be described more in-depth in the following sections. But first, let us see how the state machine is replicated using various consensus engines, such as CometBFT.
## Settlement Layer
The Settlement Layer is responsible for finalising and recording transactions on the blockchain. This layer ensures that all transactions are accurately settled and immutable, providing a verifiable record of all activities on the blockchain. It is critical for maintaining the integrity and trustworthiness of the blockchain.
The settlement layer can be performed on the chain itself or it can be externalised, allowing for the possibility of plugging in a different settlement layer as needed. For example if we were to use Rollkit and celestia for our Data Availability and Consensus, we could separate our settlement layer by introducing fraud or validity proofs. From there the settlement layer can create trust-minimised light clients, further enhancing security and efficiency. This process ensures that all transactions are accurately finalized and immutable, providing a verifiable record of all activities.
## Consensus Layer
The Consensus Layer ensures that all nodes in the network agree on the order and validity of transactions. This layer uses consensus algorithms like Byzantine Fault Tolerance (BFT) or Proof of Stake (PoS) to achieve agreement, even in the presence of malicious nodes. Consensus is crucial for maintaining the security and reliability of the blockchain.
What has been a default consensus engine in the Cosmos SDK has been CometBFT. In the most recent releases we have been moving away from this and allowing users to plug and play their own consensus engines. This is a big step forward for the Cosmos SDK as it allows for more flexibility and customisation. Other consensus engine options for example can be Rollkit with Celestias Data Availability Layer.
Here is an example of how the consensus layer works with CometBFT in the context of the Cosmos SDK:
### CometBFT
Thanks to the Cosmos SDK, developers just have to define the state machine, and [_CometBFT_](https://docs.cometbft.com/v1.0/explanation/introduction/) will handle replication over the network for them.
```mermaid
flowchart TD
subgraph Blockchain_Node[Blockchain Node]
subgraph SM[State-machine]
direction TB
SM1[Cosmos SDK]
end
subgraph CometBFT[CometBFT]
direction TB
Consensus
Networking
end
end
SM <--> CometBFT
Blockchain_Node -->|Includes| SM
Blockchain_Node -->|Includes| CometBFT
```
[CometBFT](https://docs.cometbft.com/v1.0/explanation/introduction/) is an application-agnostic engine that is responsible for handling the _networking_ and _consensus_ layers of a blockchain. In practice, this means that CometBFT is responsible for propagating and ordering transaction bytes. CometBFT relies on an eponymous Byzantine-Fault-Tolerant (BFT) algorithm to reach consensus on the order of transactions.
The [consensus algorithm adopted by CometBFT](https://docs.cometbft.com/v1.0/explanation/introduction/#consensus-overview) works with a set of special nodes called _Validators_. Validators are responsible for adding blocks of transactions to the blockchain. At any given block, there is a validator set V. A validator in V is chosen by the algorithm to be the proposer of the next block. This block is considered valid if more than two thirds of V signed a `prevote` and a `precommit` on it, and if all the transactions that it contains are valid. The validator set can be changed by rules written in the state-machine.
## ABCI
CometBFT passes transactions to the application through an interface called the [ABCI](https://docs.cometbft.com/v1.0/spec/abci/), which the application must implement.
```mermaid
graph TD
A[Application]
B[CometBFT]
A <-->|ABCI| B
```
Note that **CometBFT only handles transaction bytes**. It has no knowledge of what these bytes mean. All CometBFT does is order these transaction bytes deterministically. CometBFT passes the bytes to the application via the ABCI, and expects a return code to inform it if the messages contained in the transactions were successfully processed or not.
Here are the most important messages of the ABCI:
- `CheckTx`: When a transaction is received by CometBFT, it is passed to the application to check if a few basic requirements are met. `CheckTx` is used to protect the mempool of full-nodes against spam transactions. A special handler called the [`AnteHandler`](../beginner/04-gas-fees.md#antehandler) is used to execute a series of validation steps such as checking for sufficient fees and validating the signatures. If the checks are valid, the transaction is added to the [mempool](https://docs.cometbft.com/v1.0/explanation/core/mempool) and relayed to peer nodes. Note that transactions are not processed (i.e. no modification of the state occurs) with `CheckTx` since they have not been included in a block yet.
- `DeliverTx`: When a [valid block](https://docs.cometbft.com/v1.0/spec/core/data_structures#block) is received by CometBFT, each transaction in the block is passed to the application via `DeliverTx` in order to be processed. It is during this stage that the state transitions occur. The `AnteHandler` executes again, along with the actual [`Msg` service](../../build/building-modules/03-msg-services.md) RPC for each message in the transaction.
- `BeginBlock`/`EndBlock`: These messages are executed at the beginning and the end of each block, whether the block contains transactions or not. It is useful to trigger automatic execution of logic. Proceed with caution though, as computationally expensive loops could slow down your blockchain, or even freeze it if the loop is infinite.
Find a more detailed view of the ABCI methods from the [CometBFT docs](https://docs.cometbft.com/v1.0/spec/abci/).
Any application built on CometBFT needs to implement the ABCI interface in order to communicate with the underlying local CometBFT engine. Fortunately, you do not have to implement the ABCI interface. The Cosmos SDK provides a boilerplate implementation of it in the form of [baseapp](./03-sdk-design.md#baseapp).
## Data Availability Layer
The Data Availability (DA) Layer is a critical component of within the umbrella of the consensus layer that ensures all necessary data for transactions is available to all network participants. This layer is essential for preventing data withholding attacks, where some nodes might attempt to disrupt the network by not sharing critical transaction data.
If we use the example of Rollkit, a user initiates a transaction, which is then propagated through the rollup network by a light node. The transaction is validated by full nodes and aggregated into a block by the sequencer. This block is posted to a data availability layer like Celestia, ensuring the data is accessible and correctly ordered. The rollup light node verifies data availability from the DA layer. Full nodes then validate the block and generate necessary proofs, such as fraud proofs for optimistic rollups or zk-SNARKs/zk-STARKs for zk-rollups. These proofs are shared across the network and verified by other nodes, ensuring the rollup's integrity. Once all validations are complete, the rollup's state is updated, finalising the transaction
## Interoperability Layer
The Interoperability Layer enables communication and interaction between different blockchains. This layer facilitates cross-chain transactions and data sharing, allowing various blockchain networks to interoperate seamlessly. Interoperability is key for building a connected ecosystem of blockchains, enhancing their functionality and reach.
In this case we have separated the layers even further to really illustrate the components that make-up the blockchain architecture and it is important to note that the Cosmos SDK is designed to be interoperable with other blockchains. This is achieved through the use of the [Inter-Blockchain Communication (IBC) protocol](https://www.ibcprotocol.dev/), which allows different blockchains to communicate and transfer assets between each other.
---
## sidebar_position: 1
# Application-Specific Blockchains
:::note Synopsis
This document explains what application-specific blockchains are, and why developers would want to build one as opposed to writing Smart Contracts.
:::
## What are application-specific blockchains
Application-specific blockchains are blockchains customized to operate a single application. Instead of building a decentralized application on top of an underlying blockchain like Ethereum, developers build their own blockchain from the ground up. This means building a full-node client, a light-client, and all the necessary interfaces (CLI, REST, ...) to interact with the nodes.
```mermaid
flowchart TD
subgraph Blockchain_Node[Blockchain Node]
subgraph SM[State-machine]
direction TB
SM1[Cosmos SDK]
end
subgraph Consensus[Consensus]
direction TB
end
subgraph Networking[Networking]
direction TB
end
end
SM <--> Consensus
Consensus <--> Networking
Blockchain_Node -->|Includes| SM
Blockchain_Node -->|Includes| Consensus
Blockchain_Node -->|Includes| Networking
```
## What are the shortcomings of Smart Contracts
Virtual-machine blockchains like Ethereum addressed the demand for more programmability back in 2014. At the time, the options available for building decentralized applications were quite limited. Most developers would build on top of the complex and limited Bitcoin scripting language, or fork the Bitcoin codebase which was hard to work with and customize.
Virtual-machine blockchains came in with a new value proposition. Their state-machine incorporates a virtual-machine that is able to interpret turing-complete programs called Smart Contracts. These Smart Contracts are very good for use cases like one-time events (e.g. ICOs), but they can fall short for building complex decentralized platforms. Here is why:
- Smart Contracts are generally developed with specific programming languages that can be interpreted by the underlying virtual-machine. These programming languages are often immature and inherently limited by the constraints of the virtual-machine itself. For example, the Ethereum Virtual Machine does not allow developers to implement automatic execution of code. Developers are also limited to the account-based system of the EVM, and they can only choose from a limited set of functions for their cryptographic operations. These are examples, but they hint at the lack of **flexibility** that a smart contract environment often entails.
- Smart Contracts are all run by the same virtual machine. This means that they compete for resources, which can severely restrain **performance**. And even if the state-machine were to be split in multiple subsets (e.g. via sharding), Smart Contracts would still need to be interpreted by a virtual machine, which would limit performance compared to a native application implemented at state-machine level (our benchmarks show an improvement on the order of 10x in performance when the virtual-machine is removed).
- Another issue with the fact that Smart Contracts share the same underlying environment is the resulting limitation in **sovereignty**. A decentralized application is an ecosystem that involves multiple players. If the application is built on a general-purpose virtual-machine blockchain, stakeholders have very limited sovereignty over their application, and are ultimately superseded by the governance of the underlying blockchain. If there is a bug in the application, very little can be done about it.
Application-Specific Blockchains are designed to address these shortcomings.
## Application-Specific Blockchains Benefits
### Flexibility
Application-specific blockchains give maximum flexibility to developers:
- In Cosmos blockchains, the state-machine is typically connected to the underlying consensus engine via an interface called the [ABCI](https://docs.cometbft.com/v1.0/spec/abci/) (Application Blockchain Interface). This interface can be wrapped in any programming language, meaning developers can build their state-machine in the programming language of their choice.
- Developers can choose among multiple frameworks to build their state-machine. The most widely used today is the Cosmos SDK, but others exist (e.g. [Lotion](https://github.com/nomic-io/lotion), [Weave](https://github.com/iov-one/weave), ...). Typically the choice will be made based on the programming language they want to use (Cosmos SDK and Weave are in Golang, Lotion is in Javascript, ...).
- The ABCI also allows developers to swap the consensus engine of their application-specific blockchain. Today, only CometBFT is production-ready, but in the future other consensus engines are expected to emerge.
- Even when they settle for a framework and consensus engine, developers still have the freedom to tweak them if they don't perfectly match their requirements in their pristine forms.
- Developers are free to explore the full spectrum of tradeoffs (e.g. number of validators vs transaction throughput, safety vs availability in asynchrony, ...) and design choices (DB or IAVL tree for storage, UTXO or account model, ...).
- Developers can implement automatic execution of code. In the Cosmos SDK, logic can be automatically triggered at the beginning and the end of each block. They are also free to choose the cryptographic library used in their application, as opposed to being constrained by what is made available by the underlying environment in the case of virtual-machine blockchains.
The list above contains a few examples that show how much flexibility application-specific blockchains give to developers. The goal of Cosmos and the Cosmos SDK is to make developer tooling as generic and composable as possible, so that each part of the stack can be forked, tweaked and improved without losing compatibility. As the community grows, more alternatives for each of the core building blocks will emerge, giving more options to developers.
### Performance
Decentralized applications built with Smart Contracts are inherently capped in performance by the underlying environment. For a decentralized application to optimise performance, it needs to be built as an application-specific blockchain. Next are some of the benefits an application-specific blockchain brings in terms of performance:
- Developers of application-specific blockchains can choose to operate with a novel consensus engine such as CometBFT.
- An application-specific blockchain only operates a single application, so that the application does not compete with others for computation and storage. This is the opposite of most non-sharded virtual-machine blockchains today, where smart contracts all compete for computation and storage.
- Even if a virtual-machine blockchain offered application-based sharding coupled with an efficient consensus algorithm, performance would still be limited by the virtual-machine itself. The real throughput bottleneck is the state-machine, and requiring transactions to be interpreted by a virtual-machine significantly increases the computational complexity of processing them.
### Security
Security is hard to quantify, and greatly varies from platform to platform. That said here are some important benefits an application-specific blockchain can bring in terms of security:
- Developers can choose proven programming languages like Go when building their application-specific blockchains, as opposed to smart contract programming languages that are often more immature.
- Developers are not constrained by the cryptographic functions made available by the underlying virtual-machines. They can use their own custom cryptography, and rely on well-audited crypto libraries.
- Developers do not have to worry about potential bugs or exploitable mechanisms in the underlying virtual-machine, making it easier to reason about the security of the application.
### Sovereignty
One of the major benefits of application-specific blockchains is sovereignty. A decentralized application is an ecosystem that involves many actors: users, developers, third-party services, and more. When developers build on virtual-machine blockchain where many decentralized applications coexist, the community of the application is different than the community of the underlying blockchain, and the latter supersedes the former in the governance process. If there is a bug or if a new feature is needed, stakeholders of the application have very little leeway to upgrade the code. If the community of the underlying blockchain refuses to act, nothing can happen.
The fundamental issue here is that the governance of the application and the governance of the network are not aligned. This issue is solved by application-specific blockchains. Because application-specific blockchains specialize to operate a single application, stakeholders of the application have full control over the entire chain. This ensures that the community will not be stuck if a bug is discovered, and that it has the freedom to choose how it is going to evolve.

View File

@ -0,0 +1,40 @@
# Interchain Accounts
:::note Synopsis
Learn about what the Interchain Accounts module is
:::
## What is the Interchain Accounts module?
Interchain Accounts is the Cosmos SDK implementation of the ICS-27 protocol, which enables cross-chain account management built upon IBC.
- How does an interchain account differ from a regular account?
Regular accounts use a private key to sign transactions. Interchain Accounts are instead controlled programmatically by counterparty chains via IBC packets.
## Concepts
`Host Chain`: The chain where the interchain account is registered. The host chain listens for IBC packets from a controller chain which should contain instructions (e.g. Cosmos SDK messages) for which the interchain account will execute.
`Controller Chain`: The chain registering and controlling an account on a host chain. The controller chain sends IBC packets to the host chain to control the account.
`Interchain Account`: An account on a host chain created using the ICS-27 protocol. An interchain account has all the capabilities of a normal account. However, rather than signing transactions with a private key, a controller chain will send IBC packets to the host chain which signals what transactions the interchain account should execute.
`Authentication Module`: A custom application module on the controller chain that uses the Interchain Accounts module to build custom logic for the creation & management of interchain accounts. It can be either an IBC application module using the [legacy API](10-legacy/03-keeper-api.md), or a regular Cosmos SDK application module sending messages to the controller submodule's `MsgServer` (this is the recommended approach from ibc-go v6 if access to packet callbacks is not needed). Please note that the legacy API will eventually be removed and IBC applications will not be able to use them in later releases.
## SDK security model
SDK modules on a chain are assumed to be trustworthy. For example, there are no checks to prevent an untrustworthy module from accessing the bank keeper.
The implementation of ICS-27 in ibc-go uses this assumption in its security considerations.
The implementation assumes other IBC application modules will not bind to ports within the ICS-27 namespace.
## Channel Closure
The provided interchain account host and controller implementations do not support `ChanCloseInit`. However, they do support `ChanCloseConfirm`.
This means that the host and controller modules cannot close channels, but they will confirm channel closures initiated by other implementations of ICS-27.
In the event of a channel closing (due to a packet timeout in an ordered channel, for example), the interchain account associated with that channel can become accessible again if a new channel is created with a (JSON-formatted) version string that encodes the exact same `Metadata` information of the previous channel. The channel can be reopened using either [`MsgRegisterInterchainAccount`](./05-messages.md#msgregisterinterchainaccount) or `MsgChannelOpenInit`. If `MsgRegisterInterchainAccount` is used, then it is possible to leave the `version` field of the message empty, since it will be filled in by the controller submodule. If `MsgChannelOpenInit` is used, then the `version` field must be provided with the correct JSON-encoded `Metadata` string. See section [Understanding Active Channels](./09-active-channels.md#understanding-active-channels) for more information.
When reopening a channel with the default controller submodule, the ordering of the channel cannot be changed. In order to change the ordering of the channel, the channel has to go through a [channel upgrade handshake](../../01-ibc/06-channel-upgrades.md) or reopen the channel with a custom controller implementation.

View File

@ -0,0 +1,310 @@
---
title: Overview
---
# Overview
:::note Synopsis
Learn about what the Fee Middleware module is, and how to build custom modules that utilize the Fee Middleware functionality
:::
## What is the Fee Middleware module?
IBC does not depend on relayer operators for transaction verification. However, the relayer infrastructure ensures liveness of the Interchain network — operators listen for packets sent through channels opened between chains, and perform the vital service of ferrying these packets (and proof of the transaction on the sending chain/receipt on the receiving chain) to the clients on each side of the channel.
Though relaying is permissionless and completely decentralized and accessible, it does come with operational costs. Running full nodes to query transaction proofs and paying for transaction fees associated with IBC packets are two of the primary cost burdens which have driven the overall discussion on **a general, in-protocol incentivization mechanism for relayers**.
Initially, a [simple proposal](https://github.com/cosmos/ibc/pull/577/files) was created to incentivize relaying on ICS20 token transfers on the destination chain. However, the proposal was specific to ICS20 token transfers and would have to be reimplemented in this format on every other IBC application module.
After much discussion, the proposal was expanded to a [general incentivisation design](https://github.com/cosmos/ibc/tree/master/spec/app/ics-029-fee-payment) that can be adopted by any ICS application protocol as [middleware](../../01-ibc/04-middleware/02-develop.md).
## Concepts
ICS29 fee payments in this middleware design are built on the assumption that sender chains are the source of incentives — the chain on which packets are incentivized is the chain that distributes fees to relayer operators. However, as part of the IBC packet flow, messages have to be submitted on both sender and destination chains. This introduces the requirement of a mapping of relayer operator's addresses on both chains.
To achieve the stated requirements, the **fee middleware module has two main groups of functionality**:
- Registering of relayer addresses associated with each party involved in relaying the packet on the source chain. This registration process can be automated on start up of relayer infrastructure and happens only once, not every packet flow.
This is described in the [Fee distribution section](04-fee-distribution.md).
- Escrowing fees by any party which will be paid out to each rightful party on completion of the packet lifecycle.
This is described in the [Fee messages section](03-msgs.md).
We complete the introduction by giving a list of definitions of relevant terminology.
`Forward relayer`: The relayer that submits the `MsgRecvPacket` message for a given packet (on the destination chain).
`Reverse relayer`: The relayer that submits the `MsgAcknowledgement` message for a given packet (on the source chain).
`Timeout relayer`: The relayer that submits the `MsgTimeout` or `MsgTimeoutOnClose` messages for a given packet (on the source chain).
`Payee`: The account address on the source chain to be paid on completion of the packet lifecycle. The packet lifecycle on the source chain completes with the receipt of a `MsgTimeout`/`MsgTimeoutOnClose` or a `MsgAcknowledgement`.
`Counterparty payee`: The account address to be paid on completion of the packet lifecycle on the destination chain. The package lifecycle on the destination chain completes with a successful `MsgRecvPacket`.
`Refund address`: The address of the account paying for the incentivization of packet relaying. The account is refunded timeout fees upon successful acknowledgement. In the event of a packet timeout, both acknowledgement and receive fees are refunded.
## Known Limitations
- At the time of the release of the feature (ibc-go v4) fee payments middleware only supported incentivisation of new channels; however, with the release of channel upgradeability (ibc-go v8.1) it is possible to enable incentivisation of all existing channels.
- Even though unlikely, there exists a DoS attack vector on a fee-enabled channel if 1) there exists a relayer software implementation that is incentivised to timeout packets if the timeout fee is greater than the sum of the fees to receive and acknowledge the packet, and 2) only this type of implementation is used by operators relaying on the channel. In this situation, an attacker could continuously incentivise the relayers to never deliver the packets by incrementing the timeout fee of the packets above the sum of the receive and acknowledge fees. However, this situation is unlikely to occur because 1) another relayer behaving honestly could relay the packets before they timeout, and 2) the attack would be costly because the attacker would need to incentivise the timeout fee of the packets with their own funds. Given the low impact and unlikelihood of the attack we have decided to accept this risk and not implement any mitigation mesaures.
## Module Integration
The Fee Middleware module, as the name suggests, plays the role of an IBC middleware and as such must be configured by chain developers to route and handle IBC messages correctly.
For Cosmos SDK chains this setup is done via the `app/app.go` file, where modules are constructed and configured in order to bootstrap the blockchain application.
## Example integration of the Fee Middleware module
```go
// app.go
// Register the AppModule for the fee middleware module
ModuleBasics = module.NewBasicManager(
...
ibcfee.AppModuleBasic{},
...
)
...
// Add module account permissions for the fee middleware module
maccPerms = map[string][]string{
...
ibcfeetypes.ModuleName: nil,
}
...
// Add fee middleware Keeper
type App struct {
...
IBCFeeKeeper ibcfeekeeper.Keeper
...
}
...
// Create store keys
keys := sdk.NewKVStoreKeys(
...
ibcfeetypes.StoreKey,
...
)
...
app.IBCFeeKeeper = ibcfeekeeper.NewKeeper(
appCodec, keys[ibcfeetypes.StoreKey],
app.IBCKeeper.ChannelKeeper, // may be replaced with IBC middleware
app.IBCKeeper.ChannelKeeper,
&app.IBCKeeper.PortKeeper, app.AccountKeeper, app.BankKeeper,
)
// See the section below for configuring an application stack with the fee middleware module
...
// Register fee middleware AppModule
app.moduleManager = module.NewManager(
...
ibcfee.NewAppModule(app.IBCFeeKeeper),
)
...
// Add fee middleware to begin blocker logic
app.moduleManager.SetOrderBeginBlockers(
...
ibcfeetypes.ModuleName,
...
)
// Add fee middleware to end blocker logic
app.moduleManager.SetOrderEndBlockers(
...
ibcfeetypes.ModuleName,
...
)
// Add fee middleware to init genesis logic
app.moduleManager.SetOrderInitGenesis(
...
ibcfeetypes.ModuleName,
...
)
```
## Configuring an application stack with Fee Middleware
As mentioned in [IBC middleware development](../../01-ibc/04-middleware/02-develop.md) an application stack may be composed of many or no middlewares that nest a base application.
These layers form the complete set of application logic that enable developers to build composable and flexible IBC application stacks.
For example, an application stack may be just a single base application like `transfer`, however, the same application stack composed with `29-fee` will nest the `transfer` base application
by wrapping it with the Fee Middleware module.
### Transfer
See below for an example of how to create an application stack using `transfer` and `29-fee`.
The following `transferStack` is configured in `app/app.go` and added to the IBC `Router`.
The in-line comments describe the execution flow of packets between the application stack and IBC core.
```go
// Create Transfer Stack
// SendPacket, since it is originating from the application to core IBC:
// transferKeeper.SendPacket -> fee.SendPacket -> channel.SendPacket
// RecvPacket, message that originates from core IBC and goes down to app, the flow is the other way
// channel.RecvPacket -> fee.OnRecvPacket -> transfer.OnRecvPacket
// transfer stack contains (from top to bottom):
// - IBC Fee Middleware
// - Transfer
// create IBC module from bottom to top of stack
var transferStack porttypes.IBCModule
transferStack = transfer.NewIBCModule(app.TransferKeeper)
transferStack = ibcfee.NewIBCMiddleware(transferStack, app.IBCFeeKeeper)
// Add transfer stack to IBC Router
ibcRouter.AddRoute(ibctransfertypes.ModuleName, transferStack)
```
### Interchain Accounts
See below for an example of how to create an application stack using `27-interchain-accounts` and `29-fee`.
The following `icaControllerStack` and `icaHostStack` are configured in `app/app.go` and added to the IBC `Router` with the associated authentication module.
The in-line comments describe the execution flow of packets between the application stack and IBC core.
```go
// Create Interchain Accounts Stack
// SendPacket, since it is originating from the application to core IBC:
// icaAuthModuleKeeper.SendTx -> icaController.SendPacket -> fee.SendPacket -> channel.SendPacket
// initialize ICA module with mock module as the authentication module on the controller side
var icaControllerStack porttypes.IBCModule
icaControllerStack = ibcmock.NewIBCModule(&mockModule, ibcmock.NewMockIBCApp("", scopedICAMockKeeper))
app.ICAAuthModule = icaControllerStack.(ibcmock.IBCModule)
icaControllerStack = icacontroller.NewIBCMiddleware(icaControllerStack, app.ICAControllerKeeper)
icaControllerStack = ibcfee.NewIBCMiddleware(icaControllerStack, app.IBCFeeKeeper)
// RecvPacket, message that originates from core IBC and goes down to app, the flow is:
// channel.RecvPacket -> fee.OnRecvPacket -> icaHost.OnRecvPacket
var icaHostStack porttypes.IBCModule
icaHostStack = icahost.NewIBCModule(app.ICAHostKeeper)
icaHostStack = ibcfee.NewIBCMiddleware(icaHostStack, app.IBCFeeKeeper)
// Add authentication module, controller and host to IBC router
ibcRouter.
// the ICA Controller middleware needs to be explicitly added to the IBC Router because the
// ICA controller module owns the port capability for ICA. The ICA authentication module
// owns the channel capability.
AddRoute(ibcmock.ModuleName+icacontrollertypes.SubModuleName, icaControllerStack) // ica with mock auth module stack route to ica (top level of middleware stack)
AddRoute(icacontrollertypes.SubModuleName, icaControllerStack).
AddRoute(icahosttypes.SubModuleName, icaHostStack).
```
## Fee Distribution
Packet fees are divided into 3 distinct amounts in order to compensate relayer operators for packet relaying on fee enabled IBC channels.
- `RecvFee`: The sum of all packet receive fees distributed to a payee for successful execution of `MsgRecvPacket`.
- `AckFee`: The sum of all packet acknowledgement fees distributed to a payee for successful execution of `MsgAcknowledgement`.
- `TimeoutFee`: The sum of all packet timeout fees distributed to a payee for successful execution of `MsgTimeout`.
## Register a counterparty payee address for forward relaying
As mentioned in [ICS29 Concepts](01-overview.md#concepts), the forward relayer describes the actor who performs the submission of `MsgRecvPacket` on the destination chain.
Fee distribution for incentivized packet relays takes place on the packet source chain.
> Relayer operators are expected to register a counterparty payee address, in order to be compensated accordingly with `RecvFee`s upon completion of a packet lifecycle.
The counterparty payee address registered on the destination chain is encoded into the packet acknowledgement and communicated as such to the source chain for fee distribution.
**If a counterparty payee is not registered for the forward relayer on the destination chain, the escrowed fees will be refunded upon fee distribution.**
### Relayer operator actions
A transaction must be submitted **to the destination chain** including a `CounterpartyPayee` address of an account on the source chain.
The transaction must be signed by the `Relayer`.
Note: If a module account address is used as the `CounterpartyPayee` but the module has been set as a blocked address in the `BankKeeper`, the refunding to the module account will fail. This is because many modules use invariants to compare internal tracking of module account balances against the actual balance of the account stored in the `BankKeeper`. If a token transfer to the module account occurs without going through this module and updating the account balance of the module on the `BankKeeper`, then invariants may break and unknown behaviour could occur depending on the module implementation. Therefore, if it is desirable to use a module account that is currently blocked, the module developers should be consulted to gauge to possibility of removing the module account from the blocked list.
```go
type MsgRegisterCounterpartyPayee struct {
// unique port identifier
PortId string
// unique channel identifier
ChannelId string
// the relayer address
Relayer string
// the counterparty payee address
CounterpartyPayee string
}
```
> This message is expected to fail if:
>
> - `PortId` is invalid (see [24-host naming requirements](https://github.com/cosmos/ibc/blob/master/spec/core/ics-024-host-requirements/README.md#paths-identifiers-separators).
> - `ChannelId` is invalid (see [24-host naming requirements](https://github.com/cosmos/ibc/blob/master/spec/core/ics-024-host-requirements/README.md#paths-identifiers-separators)).
> - `Relayer` is an invalid address (see [Cosmos SDK Addresses](https://github.com/cosmos/cosmos-sdk/blob/main/docs/learn/beginner/03-accounts.md#addresses)).
> - `CounterpartyPayee` is empty or contains more than 2048 bytes.
See below for an example CLI command:
```bash
simd tx ibc-fee register-counterparty-payee transfer channel-0 \
cosmos1rsp837a4kvtgp2m4uqzdge0zzu6efqgucm0qdh \
osmo1v5y0tz01llxzf4c2afml8s3awue0ymju22wxx2 \
--from cosmos1rsp837a4kvtgp2m4uqzdge0zzu6efqgucm0qdh
```
## Register an alternative payee address for reverse and timeout relaying
As mentioned in [ICS29 Concepts](01-overview.md#concepts), the reverse relayer describes the actor who performs the submission of `MsgAcknowledgement` on the source chain.
Similarly the timeout relayer describes the actor who performs the submission of `MsgTimeout` (or `MsgTimeoutOnClose`) on the source chain.
> Relayer operators **may choose** to register an optional payee address, in order to be compensated accordingly with `AckFee`s and `TimeoutFee`s upon completion of a packet life cycle.
If a payee is not registered for the reverse or timeout relayer on the source chain, then fee distribution assumes the default behaviour, where fees are paid out to the relayer account which delivers `MsgAcknowledgement` or `MsgTimeout`/`MsgTimeoutOnClose`.
### Relayer operator actions
A transaction must be submitted **to the source chain** including a `Payee` address of an account on the source chain.
The transaction must be signed by the `Relayer`.
Note: If a module account address is used as the `Payee` it is recommended to [turn off invariant checks](https://github.com/cosmos/ibc-go/blob/v7.0.0/testing/simapp/app.go#L727) for that module.
```go
type MsgRegisterPayee struct {
// unique port identifier
PortId string
// unique channel identifier
ChannelId string
// the relayer address
Relayer string
// the payee address
Payee string
}
```
> This message is expected to fail if:
>
> - `PortId` is invalid (see [24-host naming requirements](https://github.com/cosmos/ibc/blob/master/spec/core/ics-024-host-requirements/README.md#paths-identifiers-separators).
> - `ChannelId` is invalid (see [24-host naming requirements](https://github.com/cosmos/ibc/blob/master/spec/core/ics-024-host-requirements/README.md#paths-identifiers-separators)).
> - `Relayer` is an invalid address (see [Cosmos SDK Addresses](https://github.com/cosmos/cosmos-sdk/blob/main/docs/learn/beginner/03-accounts.md#addresses)).
> - `Payee` is an invalid address (see [Cosmos SDK Addresses](https://github.com/cosmos/cosmos-sdk/blob/main/docs/learn/beginner/03-accounts.md#addresses)).
See below for an example CLI command:
```bash
simd tx ibc-fee register-payee transfer channel-0 \
cosmos1rsp837a4kvtgp2m4uqzdge0zzu6efqgucm0qdh \
cosmos153lf4zntqt33a4v0sm5cytrxyqn78q7kz8j8x5 \
--from cosmos1rsp837a4kvtgp2m4uqzdge0zzu6efqgucm0qdh
```

View File

@ -0,0 +1,178 @@
---
title: Overview
---
# Overview
:::note Synopsis
Learn about what the token Transfer module is
:::
## What is the Transfer module?
Transfer is the Cosmos SDK implementation of the [ICS-20](https://github.com/cosmos/ibc/tree/master/spec/app/ics-020-fungible-token-transfer) protocol, which enables cross-chain fungible token transfers.
## Concepts
### Acknowledgements
ICS20 uses the recommended acknowledgement format as specified by [ICS 04](https://github.com/cosmos/ibc/tree/master/spec/core/ics-004-channel-and-packet-semantics#acknowledgement-envelope).
A successful receive of a transfer packet will result in a Result Acknowledgement being written
with the value `[]byte{byte(1)}` in the `Response` field.
An unsuccessful receive of a transfer packet will result in an Error Acknowledgement being written
with the error message in the `Response` field.
### Denomination trace
The denomination trace corresponds to the information that allows a token to be traced back to its
origin chain. It contains a sequence of port and channel identifiers ordered from the most recent to
the oldest in the timeline of transfers.
This information is included on the token's base denomination field in the form of a hash to prevent an
unbounded denomination length. For example, the token `transfer/channelToA/uatom` will be displayed
as `ibc/7F1D3FCF4AE79E1554D670D1AD949A9BA4E4A3C76C63093E17E446A46061A7A2`. The human readable denomination
is stored using `x/bank` module's [denom metadata](https://docs.cosmos.network/main/build/modules/bank#denom-metadata)
feature. You may display the human readable denominations by querying balances with the `--resolve-denom` flag, as in:
```shell
simd query bank balances [address] --resolve-denom
```
Each send to any chain other than the one it was previously received from is a movement forwards in
the token's timeline. This causes trace to be added to the token's history and the destination port
and destination channel to be prefixed to the denomination. In these instances the sender chain is
acting as the "source zone". When the token is sent back to the chain it previously received from, the
prefix is removed. This is a backwards movement in the token's timeline and the sender chain is
acting as the "sink zone".
It is strongly recommended to read the full details of [ADR 001: Coin Source Tracing](/architecture/adr-001-coin-source-tracing) to understand the implications and context of the IBC token representations.
## UX suggestions for clients
For clients (wallets, exchanges, applications, block explorers, etc) that want to display the source of the token, it is recommended to use the following alternatives for each of the cases below:
### Direct connection
If the denomination trace contains a single identifier prefix pair (as in the example above), then
the easiest way to retrieve the chain and light client identifier is to map the trace information
directly. In summary, this requires querying the channel from the denomination trace identifiers,
and then the counterparty client state using the counterparty port and channel identifiers from the
retrieved channel.
A general pseudo algorithm would look like the following:
1. Query the full denomination trace.
2. Query the channel with the `portID/channelID` pair, which corresponds to the first destination of the
token.
3. Query the client state using the identifiers pair. Note that this query will return a `"Not
Found"` response if the current chain is not connected to this channel.
4. Retrieve the client identifier or chain identifier from the client state (eg: on
Tendermint clients) and store it locally.
Using the gRPC gateway client service the steps above would be, with a given IBC token `ibc/7F1D3FCF4AE79E1554D670D1AD949A9BA4E4A3C76C63093E17E446A46061A7A2` stored on `chainB`:
1. `GET /ibc/apps/transfer/v1/denom_traces/7F1D3FCF4AE79E1554D670D1AD949A9BA4E4A3C76C63093E17E446A46061A7A2` -> `{"path": "transfer/channelToA", "base_denom": "uatom"}`
2. `GET /ibc/apps/transfer/v1/channels/channelToA/ports/transfer/client_state"` -> `{"client_id": "clientA", "chain-id": "chainA", ...}`
3. `GET /ibc/apps/transfer/v1/channels/channelToA/ports/transfer"` -> `{"channel_id": "channelToA", port_id": "transfer", counterparty: {"channel_id": "channelToB", port_id": "transfer"}, ...}`
4. `GET /ibc/apps/transfer/v1/channels/channelToB/ports/transfer/client_state" -> {"client_id": "clientB", "chain-id": "chainB", ...}`
Then, the token transfer chain path for the `uatom` denomination would be: `chainA` -> `chainB`.
### Multiple hops
The multiple channel hops case applies when the token has passed through multiple chains between the original source and final destination chains.
The IBC protocol doesn't know the topology of the overall network (i.e connections between chains and identifier names between them). For this reason, in the multiple hops case, a particular chain in the timeline of the individual transfers can't query the chain and client identifiers of the other chains.
Take for example the following sequence of transfers `A -> B -> C` for an IBC token, with a final prefix path (trace info) of `transfer/channelChainC/transfer/channelChainB`. What the paragraph above means is that even in the case that chain `C` is directly connected to chain `A`, querying the port and channel identifiers that chain `B` uses to connect to chain `A` (eg: `transfer/channelChainA`) can be completely different from the one that chain `C` uses to connect to chain `A` (eg: `transfer/channelToChainA`).
Thus the proposed solution for clients that the IBC team recommends are the following:
- **Connect to all chains**: Connecting to all the chains in the timeline would allow clients to
perform the queries outlined in the [direct connection](#direct-connection) section to each
relevant chain. By repeatedly following the port and channel denomination trace transfer timeline,
clients should always be able to find all the relevant identifiers. This comes at the tradeoff
that the client must connect to nodes on each of the chains in order to perform the queries.
- **Relayer as a Service (RaaS)**: A longer term solution is to use/create a relayer service that
could map the denomination trace to the chain path timeline for each token (i.e `origin chain ->
chain #1 -> ... -> chain #(n-1) -> final chain`). These services could provide merkle proofs in
order to allow clients to optionally verify the path timeline correctness for themselves by
running light clients. If the proofs are not verified, they should be considered as trusted third
parties services. Additionally, client would be advised in the future to use RaaS that support the
largest number of connections between chains in the ecosystem. Unfortunately, none of the existing
public relayers (in [Golang](https://github.com/cosmos/relayer) and
[Rust](https://github.com/informalsystems/ibc-rs)), provide this service to clients.
:::tip
The only viable alternative for clients (at the time of writing) to tokens with multiple connection hops, is to connect to all chains directly and perform relevant queries to each of them in the sequence.
:::
## Forwarding
:::info
Token forwarding and unwinding is supported only on ICS20 v2 transfer channels.
:::
Forwarding allows tokens to be routed to a final destination through multiple (up to 8) intermediary
chains. With forwarding, it's also possible to unwind IBC vouchers to their native chain, and forward
them afterwards to another destination, all with just a single transfer transaction on the sending chain.
### Forward tokens
Native tokens or IBC vouchers on any chain can be forwarded through intermediary chains to reach their
final destination. For example, given the topology below, with 3 chains and a transfer channel between
chains A and B and between chains B and C:
![Light Mode Forwarding](./images/forwarding-3-chains-light.png#gh-light-mode-only)![Dark Mode Forwarding](./images/forwarding-3-chains-dark.png#gh-dark-mode-only)
Native tokens on chain `A` can be sent to chain `C` through chain `B`. The routing is specified by the
source port ID and channel ID of choice on every intermediary chain. In this example, there is only one
forwarding hop on chain `B` and the port ID, channel ID pair is `transfer`, `channelBToC`. Forwarding of
a multi-denom collections of tokens is also allowed (i.e. forwarding of tokens of different denominations).
### Unwind tokens
Taking again as an example the topology from the previous section, we assume that native tokens on chain `A`
have been transferred to chain `C`. The IBC vouchers on chain `C` have the denomination trace
`transfer/channelCtoB/transfer/channelBtoA`, and with forwarding it is possible to submit a transfer message
on chain `C` and automatically unwind the vouchers through chain `B` to chain `A`, so that the tokens recovered
on the origin chain regain their native denomination. In order to execute automatic unwinding, the transfer
module does not require extra user input: the unwind route is encoded in the denomination trace with the
pairs of destination port ID, channel ID that are added on every chain where the tokens are received.
Please note that unwinding of vouchers is only allowed when vouchers transferred all share the same denomination
trace (signifying coins that all originate from the same source). It is not possible to unwind vouchers of two different
IBC denominations, since they come from different source chains.
### Unwind tokens and then forward
Unwinding and forwarding can be used in combination, so that vouchers are first unwound to their origin chain
and then forwarded to a final destination. The same restriction as in the unwinding case applies: only vouchers
of a single IBC denomination can be used.
## Locked funds
In some [exceptional cases](/architecture/adr-026-ibc-client-recovery-mechanisms#exceptional-cases), a client state associated with a given channel cannot be updated. This causes that funds from fungible tokens in that channel will be permanently locked and thus can no longer be transferred.
To mitigate this, a client update governance proposal can be submitted to update the frozen client
with a new valid header. Once the proposal passes the client state will be unfrozen and the funds
from the associated channels will then be unlocked. This mechanism only applies to clients that
allow updates via governance, such as Tendermint clients.
In addition to this, it's important to mention that a token must be sent back along the exact route
that it took originally in order to return it to its original form on the source chain (eg: the
Cosmos Hub for the `uatom`). Sending a token back to the same chain across a different channel will
**not** move the token back across its timeline. If a channel in the chain history closes before the
token can be sent back across that channel, then the token will not be returnable to its original
form.
## Security considerations
For safety, no other module must be capable of minting tokens with the `ibc/` prefix. The IBC
transfer module needs a subset of the denomination space that only it can create tokens in.
## Channel Closure
The IBC transfer module does not support channel closure.

3
go.mod
View File

@ -73,6 +73,7 @@ require (
github.com/ipfs/boxo v0.24.3
github.com/ipfs/go-cid v0.4.1
github.com/ipfs/kubo v0.32.1
github.com/jackc/pgx/v5 v5.7.1
github.com/joho/godotenv v1.5.1
github.com/labstack/echo-contrib v0.17.1
github.com/labstack/echo/v4 v4.12.0
@ -232,6 +233,8 @@ require (
github.com/ipld/go-codec-dagpb v1.6.0 // indirect
github.com/ipld/go-ipld-prime v0.21.0 // indirect
github.com/ipshipyard/p2p-forge v0.0.2 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect
github.com/jbenet/goprocess v0.1.4 // indirect

8
go.sum
View File

@ -1766,6 +1766,14 @@ github.com/iris-contrib/blackfriday v2.0.0+incompatible/go.mod h1:UzZ2bDEoaSGPbk
github.com/iris-contrib/go.uuid v2.0.0+incompatible/go.mod h1:iz2lgM/1UnEf1kP0L/+fafWORmlnuysV2EMP8MW+qe0=
github.com/iris-contrib/i18n v0.0.0-20171121225848-987a633949d0/go.mod h1:pMCz62A0xJL6I+umB2YTlFRwWXaDFA0jy+5HzGiJjqI=
github.com/iris-contrib/schema v0.0.1/go.mod h1:urYA3uvUNG1TIIjOSCzHr9/LmbQo8LrOcOqfqxa4hXw=
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo=
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
github.com/jackc/pgx/v5 v5.7.1 h1:x7SYsPBYDkHDksogeSmZZ5xzThcTgRz++I5E+ePFUcs=
github.com/jackc/pgx/v5 v5.7.1/go.mod h1:e7O26IywZZ+naJtWWos6i6fvWK+29etgITqrqHLfoZA=
github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo=
github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA=

View File

@ -13,6 +13,4 @@ type Config struct {
SonrRpcUrl string `pkl:"sonrRpcUrl" json:"sonrRpcUrl,omitempty"`
SonrChainId string `pkl:"sonrChainId" json:"sonrChainId,omitempty"`
VaultSchema *Schema `pkl:"vaultSchema" json:"vaultSchema,omitempty"`
}

View File

@ -1,22 +0,0 @@
// Code generated from Pkl module `sonr.net.Motr`. DO NOT EDIT.
package motr
type Schema struct {
Version int `pkl:"version"`
Account string `pkl:"account" json:"account,omitempty"`
Asset string `pkl:"asset" json:"asset,omitempty"`
Chain string `pkl:"chain" json:"chain,omitempty"`
Credential string `pkl:"credential" json:"credential,omitempty"`
Jwk string `pkl:"jwk" json:"jwk,omitempty"`
Grant string `pkl:"grant" json:"grant,omitempty"`
Keyshare string `pkl:"keyshare" json:"keyshare,omitempty"`
Profile string `pkl:"profile" json:"profile,omitempty"`
}

View File

@ -6,6 +6,5 @@ import "github.com/apple/pkl-go/pkl"
func init() {
pkl.RegisterMapping("sonr.net.Motr", Motr{})
pkl.RegisterMapping("sonr.net.Motr#Config", Config{})
pkl.RegisterMapping("sonr.net.Motr#Schema", Schema{})
pkl.RegisterMapping("sonr.net.Motr#Environment", Environment{})
}

View File

@ -1,25 +0,0 @@
package database
import (
"context"
"database/sql"
_ "github.com/ncruces/go-sqlite3/driver"
_ "github.com/ncruces/go-sqlite3/embed"
config "github.com/onsonr/sonr/internal/config/hway"
"github.com/onsonr/sonr/internal/database/sink"
)
// NewDB initializes and returns a configured database connection
func NewDB(env config.Hway) (*sql.DB, error) {
db, err := sql.Open("sqlite3", ":memory:")
if err != nil {
return nil, err
}
// create tables
if _, err := db.ExecContext(context.Background(), sink.SchemaSQL); err != nil {
return nil, err
}
return db, nil
}

View File

@ -1,58 +0,0 @@
package database
import (
ctx "github.com/onsonr/sonr/internal/context"
"github.com/go-webauthn/webauthn/protocol"
"github.com/labstack/echo/v4"
"github.com/medama-io/go-useragent"
"github.com/onsonr/sonr/internal/database/repository"
"github.com/segmentio/ksuid"
)
func BaseSessionCreateParams(e echo.Context) repository.CreateSessionParams {
// f := rand.Intn(5) + 1
// l := rand.Intn(4) + 1
challenge, _ := protocol.CreateChallenge()
id := getOrCreateSessionID(e)
ua := useragent.NewParser()
s := ua.Parse(e.Request().UserAgent())
return repository.CreateSessionParams{
ID: id,
BrowserName: s.GetBrowser(),
BrowserVersion: s.GetMajorVersion(),
ClientIpaddr: e.RealIP(),
Platform: s.GetOS(),
IsMobile: s.IsMobile(),
IsTablet: s.IsTablet(),
IsDesktop: s.IsDesktop(),
IsBot: s.IsBot(),
IsTv: s.IsTV(),
// IsHumanFirst: int64(f),
// IsHumanLast: int64(l),
Challenge: challenge.String(),
}
}
func getOrCreateSessionID(c echo.Context) string {
if ok := ctx.CookieExists(c, ctx.SessionID); !ok {
sessionID := ksuid.New().String()
ctx.WriteCookie(c, ctx.SessionID, sessionID)
return sessionID
}
sessionID, err := ctx.ReadCookie(c, ctx.SessionID)
if err != nil {
sessionID = ksuid.New().String()
ctx.WriteCookie(c, ctx.SessionID, sessionID)
}
return sessionID
}
func boolToInt64(b bool) int64 {
if b {
return 1
}
return 0
}

View File

@ -1,9 +0,0 @@
version: "2"
sql:
- engine: "sqlite"
queries: "./sink/query.sql"
schema: "./sink/schema.sql"
gen:
go:
package: "repository"
out: "repository"

View File

@ -4,8 +4,7 @@ import (
"encoding/json"
"github.com/ipfs/boxo/files"
config "github.com/onsonr/sonr/internal/config/motr"
"github.com/onsonr/sonr/internal/models"
"github.com/onsonr/sonr/internal/config/motr"
)
const SchemaVersion = 1
@ -18,8 +17,8 @@ const (
)
// spawnVaultDirectory creates a new directory with the default files
func NewVaultFS(cfg *config.Config) (files.Directory, error) {
manifestBz, err := models.NewWebManifest()
func NewVaultFS(cfg *motr.Config) (files.Directory, error) {
manifestBz, err := NewWebManifest()
if err != nil {
return nil, err
}
@ -37,28 +36,13 @@ func NewVaultFS(cfg *config.Config) (files.Directory, error) {
}
// NewVaultConfig returns the default vault config
func NewVaultConfig(addr string, ucanCID string) *config.Config {
return &config.Config{
func NewVaultConfig(addr string, ucanCID string) *motr.Config {
return &motr.Config{
MotrToken: ucanCID,
MotrAddress: addr,
IpfsGatewayUrl: "http://localhost:80",
SonrApiUrl: "http://localhost:1317",
SonrRpcUrl: "http://localhost:26657",
SonrChainId: "sonr-testnet-1",
VaultSchema: DefaultSchema(),
}
}
// DefaultSchema returns the default schema
func DefaultSchema() *config.Schema {
return &config.Schema{
Version: SchemaVersion,
Account: getSchema(&models.Account{}),
Asset: getSchema(&models.Asset{}),
Chain: getSchema(&models.Chain{}),
Credential: getSchema(&models.Credential{}),
Grant: getSchema(&models.Grant{}),
Keyshare: getSchema(&models.Keyshare{}),
Profile: getSchema(&models.Profile{}),
}
}

View File

@ -1,4 +1,4 @@
package models
package embed
import "encoding/json"

View File

@ -1,20 +0,0 @@
// Code generated from Pkl module `sonr.orm.Models`. DO NOT EDIT.
package models
type Account struct {
Id string `pkl:"id" json:"id,omitempty" query:"id"`
Name string `pkl:"name" json:"name,omitempty"`
Address any `pkl:"address" json:"address,omitempty"`
PublicKey string `pkl:"publicKey" json:"publicKey,omitempty"`
ChainCode uint `pkl:"chainCode" json:"chainCode,omitempty"`
Index int `pkl:"index" json:"index,omitempty"`
Controller string `pkl:"controller" json:"controller,omitempty"`
CreatedAt *string `pkl:"createdAt" json:"createdAt,omitempty"`
}

View File

@ -1,16 +0,0 @@
// Code generated from Pkl module `sonr.orm.Models`. DO NOT EDIT.
package models
type Asset struct {
Id string `pkl:"id" json:"id,omitempty" query:"id"`
Name string `pkl:"name" json:"name,omitempty"`
Symbol string `pkl:"symbol" json:"symbol,omitempty"`
Decimals int `pkl:"decimals" json:"decimals,omitempty"`
ChainCode uint `pkl:"chainCode" json:"chainCode,omitempty"`
CreatedAt *string `pkl:"createdAt" json:"createdAt,omitempty"`
}

View File

@ -1,14 +0,0 @@
// Code generated from Pkl module `sonr.orm.Models`. DO NOT EDIT.
package models
type Chain struct {
Id string `pkl:"id" json:"id,omitempty" query:"id"`
Name string `pkl:"name" json:"name,omitempty"`
NetworkId string `pkl:"networkId" json:"networkId,omitempty"`
ChainCode uint `pkl:"chainCode" json:"chainCode,omitempty"`
CreatedAt *string `pkl:"createdAt" json:"createdAt,omitempty"`
}

View File

@ -1,40 +0,0 @@
// Code generated from Pkl module `sonr.orm.Models`. DO NOT EDIT.
package models
type Credential struct {
Id string `pkl:"id" json:"id,omitempty" query:"id"`
Subject string `pkl:"subject" json:"subject,omitempty"`
Controller string `pkl:"controller" json:"controller,omitempty"`
AttestationType string `pkl:"attestationType" json:"attestationType,omitempty"`
Origin string `pkl:"origin" json:"origin,omitempty"`
Label *string `pkl:"label" json:"label,omitempty"`
DeviceId *string `pkl:"deviceId" json:"deviceId,omitempty"`
CredentialId string `pkl:"credentialId" json:"credentialId,omitempty"`
PublicKey string `pkl:"publicKey" json:"publicKey,omitempty"`
Transport []string `pkl:"transport" json:"transport,omitempty"`
SignCount uint `pkl:"signCount" json:"signCount,omitempty"`
UserPresent bool `pkl:"userPresent" json:"userPresent,omitempty"`
UserVerified bool `pkl:"userVerified" json:"userVerified,omitempty"`
BackupEligible bool `pkl:"backupEligible" json:"backupEligible,omitempty"`
BackupState bool `pkl:"backupState" json:"backupState,omitempty"`
CloneWarning bool `pkl:"cloneWarning" json:"cloneWarning,omitempty"`
CreatedAt *string `pkl:"createdAt" json:"createdAt,omitempty"`
UpdatedAt *string `pkl:"updatedAt" json:"updatedAt,omitempty"`
}

View File

@ -1,28 +0,0 @@
// Code generated from Pkl module `sonr.orm.Models`. DO NOT EDIT.
package models
import (
"github.com/onsonr/sonr/internal/models/keyalgorithm"
"github.com/onsonr/sonr/internal/models/keycurve"
"github.com/onsonr/sonr/internal/models/keyencoding"
"github.com/onsonr/sonr/internal/models/keyrole"
"github.com/onsonr/sonr/internal/models/keytype"
)
type DID struct {
Id string `pkl:"id" json:"id,omitempty" query:"id"`
Role keyrole.KeyRole `pkl:"role"`
Algorithm keyalgorithm.KeyAlgorithm `pkl:"algorithm"`
Encoding keyencoding.KeyEncoding `pkl:"encoding"`
Curve keycurve.KeyCurve `pkl:"curve"`
KeyType keytype.KeyType `pkl:"key_type"`
Raw string `pkl:"raw"`
Jwk *JWK `pkl:"jwk"`
}

View File

@ -1,20 +0,0 @@
// Code generated from Pkl module `sonr.orm.Models`. DO NOT EDIT.
package models
type Grant struct {
Id uint `pkl:"id" json:"id,omitempty" query:"id"`
Subject string `pkl:"subject" json:"subject,omitempty"`
Controller string `pkl:"controller" json:"controller,omitempty"`
Origin string `pkl:"origin" json:"origin,omitempty"`
Token string `pkl:"token" json:"token,omitempty"`
Scopes []string `pkl:"scopes" json:"scopes,omitempty"`
CreatedAt *string `pkl:"createdAt" json:"createdAt,omitempty"`
UpdatedAt *string `pkl:"updatedAt" json:"updatedAt,omitempty"`
}

View File

@ -1,16 +0,0 @@
// Code generated from Pkl module `sonr.orm.Models`. DO NOT EDIT.
package models
type JWK struct {
Kty string `pkl:"kty" json:"kty,omitempty"`
Crv string `pkl:"crv" json:"crv,omitempty"`
X string `pkl:"x" json:"x,omitempty"`
Y string `pkl:"y" json:"y,omitempty"`
N string `pkl:"n" json:"n,omitempty"`
E string `pkl:"e" json:"e,omitempty"`
}

View File

@ -1,14 +0,0 @@
// Code generated from Pkl module `sonr.orm.Models`. DO NOT EDIT.
package models
type Keyshare struct {
Id string `pkl:"id" json:"id,omitempty" query:"id"`
Data string `pkl:"data" json:"data,omitempty"`
Role int `pkl:"role" json:"role,omitempty"`
CreatedAt *string `pkl:"createdAt" json:"createdAt,omitempty"`
LastRefreshed *string `pkl:"lastRefreshed" json:"lastRefreshed,omitempty"`
}

View File

@ -1,39 +0,0 @@
// Code generated from Pkl module `sonr.orm.Models`. DO NOT EDIT.
package models
import (
"context"
"github.com/apple/pkl-go/pkl"
)
type Models struct {
DbName string `pkl:"db_name"`
DbVersion int `pkl:"db_version"`
}
// LoadFromPath loads the pkl module at the given path and evaluates it into a Models
func LoadFromPath(ctx context.Context, path string) (ret *Models, err error) {
evaluator, err := pkl.NewEvaluator(ctx, pkl.PreconfiguredOptions)
if err != nil {
return nil, err
}
defer func() {
cerr := evaluator.Close()
if err == nil {
err = cerr
}
}()
ret, err = Load(ctx, evaluator, pkl.FileSource(path))
return ret, err
}
// Load loads the pkl module at the given source and evaluates it with the given evaluator into a Models
func Load(ctx context.Context, evaluator pkl.Evaluator, source *pkl.ModuleSource) (*Models, error) {
var ret Models
if err := evaluator.EvaluateModule(ctx, source, &ret); err != nil {
return nil, err
}
return &ret, nil
}

View File

@ -1,20 +0,0 @@
// Code generated from Pkl module `sonr.orm.Models`. DO NOT EDIT.
package models
type Profile struct {
Id string `pkl:"id" json:"id,omitempty" query:"id"`
Subject string `pkl:"subject" json:"subject,omitempty"`
Controller string `pkl:"controller" json:"controller,omitempty"`
OriginUri *string `pkl:"originUri" json:"originUri,omitempty"`
PublicMetadata *string `pkl:"publicMetadata" json:"publicMetadata,omitempty"`
PrivateMetadata *string `pkl:"privateMetadata" json:"privateMetadata,omitempty"`
CreatedAt *string `pkl:"createdAt" json:"createdAt,omitempty"`
UpdatedAt *string `pkl:"updatedAt" json:"updatedAt,omitempty"`
}

View File

@ -1,46 +0,0 @@
// Code generated from Pkl module `sonr.orm.Models`. DO NOT EDIT.
package assettype
import (
"encoding"
"fmt"
)
type AssetType string
const (
Native AssetType = "native"
Wrapped AssetType = "wrapped"
Staking AssetType = "staking"
Pool AssetType = "pool"
Ibc AssetType = "ibc"
Cw20 AssetType = "cw20"
)
// String returns the string representation of AssetType
func (rcv AssetType) String() string {
return string(rcv)
}
var _ encoding.BinaryUnmarshaler = new(AssetType)
// UnmarshalBinary implements encoding.BinaryUnmarshaler for AssetType.
func (rcv *AssetType) UnmarshalBinary(data []byte) error {
switch str := string(data); str {
case "native":
*rcv = Native
case "wrapped":
*rcv = Wrapped
case "staking":
*rcv = Staking
case "pool":
*rcv = Pool
case "ibc":
*rcv = Ibc
case "cw20":
*rcv = Cw20
default:
return fmt.Errorf(`illegal: "%s" is not a valid AssetType`, str)
}
return nil
}

View File

@ -1,52 +0,0 @@
// Code generated from Pkl module `sonr.orm.Models`. DO NOT EDIT.
package didmethod
import (
"encoding"
"fmt"
)
type DIDMethod string
const (
Ipfs DIDMethod = "ipfs"
Sonr DIDMethod = "sonr"
Bitcoin DIDMethod = "bitcoin"
Ethereum DIDMethod = "ethereum"
Ibc DIDMethod = "ibc"
Webauthn DIDMethod = "webauthn"
Dwn DIDMethod = "dwn"
Service DIDMethod = "service"
)
// String returns the string representation of DIDMethod
func (rcv DIDMethod) String() string {
return string(rcv)
}
var _ encoding.BinaryUnmarshaler = new(DIDMethod)
// UnmarshalBinary implements encoding.BinaryUnmarshaler for DIDMethod.
func (rcv *DIDMethod) UnmarshalBinary(data []byte) error {
switch str := string(data); str {
case "ipfs":
*rcv = Ipfs
case "sonr":
*rcv = Sonr
case "bitcoin":
*rcv = Bitcoin
case "ethereum":
*rcv = Ethereum
case "ibc":
*rcv = Ibc
case "webauthn":
*rcv = Webauthn
case "dwn":
*rcv = Dwn
case "service":
*rcv = Service
default:
return fmt.Errorf(`illegal: "%s" is not a valid DIDMethod`, str)
}
return nil
}

View File

@ -0,0 +1,32 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.27.0
package hwayorm
import (
"context"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgconn"
)
type DBTX interface {
Exec(context.Context, string, ...interface{}) (pgconn.CommandTag, error)
Query(context.Context, string, ...interface{}) (pgx.Rows, error)
QueryRow(context.Context, string, ...interface{}) pgx.Row
}
func New(db DBTX) *Queries {
return &Queries{db: db}
}
type Queries struct {
db DBTX
}
func (q *Queries) WithTx(tx pgx.Tx) *Queries {
return &Queries{
db: tx,
}
}

View File

@ -0,0 +1,98 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.27.0
package hwayorm
import (
"github.com/jackc/pgx/v5/pgtype"
)
type Account struct {
ID string
CreatedAt pgtype.Timestamptz
UpdatedAt pgtype.Timestamptz
DeletedAt pgtype.Timestamptz
Number int64
Sequence int32
Address string
PublicKey string
ChainID string
Controller string
IsSubsidiary bool
IsValidator bool
IsDelegator bool
IsAccountable bool
}
type Asset struct {
ID string
CreatedAt pgtype.Timestamptz
UpdatedAt pgtype.Timestamptz
DeletedAt pgtype.Timestamptz
Name string
Symbol string
Decimals int32
ChainID string
Channel string
AssetType string
CoingeckoID pgtype.Text
}
type Credential struct {
ID string
CreatedAt pgtype.Timestamptz
UpdatedAt pgtype.Timestamptz
DeletedAt pgtype.Timestamptz
Handle string
CredentialID string
AuthenticatorAttachment string
Origin string
Type string
Transports string
}
type Profile struct {
ID string
CreatedAt pgtype.Timestamptz
UpdatedAt pgtype.Timestamptz
DeletedAt pgtype.Timestamptz
Address string
Handle string
Origin string
Name string
}
type Session struct {
ID string
CreatedAt pgtype.Timestamptz
UpdatedAt pgtype.Timestamptz
DeletedAt pgtype.Timestamptz
BrowserName string
BrowserVersion string
ClientIpaddr string
Platform string
IsDesktop bool
IsMobile bool
IsTablet bool
IsTv bool
IsBot bool
Challenge string
IsHumanFirst bool
IsHumanLast bool
ProfileID string
}
type Vault struct {
ID int64
CreatedAt pgtype.Timestamptz
UpdatedAt pgtype.Timestamptz
DeletedAt pgtype.Timestamptz
Handle string
Origin string
Address string
Cid string
Config []byte
SessionID int64
RedirectUri string
}

View File

@ -0,0 +1,578 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.27.0
// source: query.sql
package hwayorm
import (
"context"
)
const checkHandleExists = `-- name: CheckHandleExists :one
SELECT COUNT(*) > 0 as handle_exists FROM profiles
WHERE handle = $1
AND deleted_at IS NULL
`
func (q *Queries) CheckHandleExists(ctx context.Context, handle string) (bool, error) {
row := q.db.QueryRow(ctx, checkHandleExists, handle)
var handle_exists bool
err := row.Scan(&handle_exists)
return handle_exists, err
}
const createSession = `-- name: CreateSession :one
INSERT INTO sessions (
id,
browser_name,
browser_version,
client_ipaddr,
platform,
is_desktop,
is_mobile,
is_tablet,
is_tv,
is_bot,
challenge,
is_human_first,
is_human_last,
profile_id
) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14)
RETURNING id, created_at, updated_at, deleted_at, browser_name, browser_version, client_ipaddr, platform, is_desktop, is_mobile, is_tablet, is_tv, is_bot, challenge, is_human_first, is_human_last, profile_id
`
type CreateSessionParams struct {
ID string
BrowserName string
BrowserVersion string
ClientIpaddr string
Platform string
IsDesktop bool
IsMobile bool
IsTablet bool
IsTv bool
IsBot bool
Challenge string
IsHumanFirst bool
IsHumanLast bool
ProfileID string
}
func (q *Queries) CreateSession(ctx context.Context, arg CreateSessionParams) (Session, error) {
row := q.db.QueryRow(ctx, createSession,
arg.ID,
arg.BrowserName,
arg.BrowserVersion,
arg.ClientIpaddr,
arg.Platform,
arg.IsDesktop,
arg.IsMobile,
arg.IsTablet,
arg.IsTv,
arg.IsBot,
arg.Challenge,
arg.IsHumanFirst,
arg.IsHumanLast,
arg.ProfileID,
)
var i Session
err := row.Scan(
&i.ID,
&i.CreatedAt,
&i.UpdatedAt,
&i.DeletedAt,
&i.BrowserName,
&i.BrowserVersion,
&i.ClientIpaddr,
&i.Platform,
&i.IsDesktop,
&i.IsMobile,
&i.IsTablet,
&i.IsTv,
&i.IsBot,
&i.Challenge,
&i.IsHumanFirst,
&i.IsHumanLast,
&i.ProfileID,
)
return i, err
}
const getChallengeBySessionID = `-- name: GetChallengeBySessionID :one
SELECT challenge FROM sessions
WHERE id = $1 AND deleted_at IS NULL
LIMIT 1
`
func (q *Queries) GetChallengeBySessionID(ctx context.Context, id string) (string, error) {
row := q.db.QueryRow(ctx, getChallengeBySessionID, id)
var challenge string
err := row.Scan(&challenge)
return challenge, err
}
const getCredentialByID = `-- name: GetCredentialByID :one
SELECT id, created_at, updated_at, deleted_at, handle, credential_id, authenticator_attachment, origin, type, transports FROM credentials
WHERE credential_id = $1
AND deleted_at IS NULL
LIMIT 1
`
func (q *Queries) GetCredentialByID(ctx context.Context, credentialID string) (Credential, error) {
row := q.db.QueryRow(ctx, getCredentialByID, credentialID)
var i Credential
err := row.Scan(
&i.ID,
&i.CreatedAt,
&i.UpdatedAt,
&i.DeletedAt,
&i.Handle,
&i.CredentialID,
&i.AuthenticatorAttachment,
&i.Origin,
&i.Type,
&i.Transports,
)
return i, err
}
const getCredentialsByHandle = `-- name: GetCredentialsByHandle :many
SELECT id, created_at, updated_at, deleted_at, handle, credential_id, authenticator_attachment, origin, type, transports FROM credentials
WHERE handle = $1
AND deleted_at IS NULL
`
func (q *Queries) GetCredentialsByHandle(ctx context.Context, handle string) ([]Credential, error) {
rows, err := q.db.Query(ctx, getCredentialsByHandle, handle)
if err != nil {
return nil, err
}
defer rows.Close()
var items []Credential
for rows.Next() {
var i Credential
if err := rows.Scan(
&i.ID,
&i.CreatedAt,
&i.UpdatedAt,
&i.DeletedAt,
&i.Handle,
&i.CredentialID,
&i.AuthenticatorAttachment,
&i.Origin,
&i.Type,
&i.Transports,
); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const getHumanVerificationNumbers = `-- name: GetHumanVerificationNumbers :one
SELECT is_human_first, is_human_last FROM sessions
WHERE id = $1 AND deleted_at IS NULL
LIMIT 1
`
type GetHumanVerificationNumbersRow struct {
IsHumanFirst bool
IsHumanLast bool
}
func (q *Queries) GetHumanVerificationNumbers(ctx context.Context, id string) (GetHumanVerificationNumbersRow, error) {
row := q.db.QueryRow(ctx, getHumanVerificationNumbers, id)
var i GetHumanVerificationNumbersRow
err := row.Scan(&i.IsHumanFirst, &i.IsHumanLast)
return i, err
}
const getProfileByAddress = `-- name: GetProfileByAddress :one
SELECT id, created_at, updated_at, deleted_at, address, handle, origin, name FROM profiles
WHERE address = $1 AND deleted_at IS NULL
LIMIT 1
`
func (q *Queries) GetProfileByAddress(ctx context.Context, address string) (Profile, error) {
row := q.db.QueryRow(ctx, getProfileByAddress, address)
var i Profile
err := row.Scan(
&i.ID,
&i.CreatedAt,
&i.UpdatedAt,
&i.DeletedAt,
&i.Address,
&i.Handle,
&i.Origin,
&i.Name,
)
return i, err
}
const getProfileByHandle = `-- name: GetProfileByHandle :one
SELECT id, created_at, updated_at, deleted_at, address, handle, origin, name FROM profiles
WHERE handle = $1
AND deleted_at IS NULL
LIMIT 1
`
func (q *Queries) GetProfileByHandle(ctx context.Context, handle string) (Profile, error) {
row := q.db.QueryRow(ctx, getProfileByHandle, handle)
var i Profile
err := row.Scan(
&i.ID,
&i.CreatedAt,
&i.UpdatedAt,
&i.DeletedAt,
&i.Address,
&i.Handle,
&i.Origin,
&i.Name,
)
return i, err
}
const getProfileByID = `-- name: GetProfileByID :one
SELECT id, created_at, updated_at, deleted_at, address, handle, origin, name FROM profiles
WHERE id = $1 AND deleted_at IS NULL
LIMIT 1
`
func (q *Queries) GetProfileByID(ctx context.Context, id string) (Profile, error) {
row := q.db.QueryRow(ctx, getProfileByID, id)
var i Profile
err := row.Scan(
&i.ID,
&i.CreatedAt,
&i.UpdatedAt,
&i.DeletedAt,
&i.Address,
&i.Handle,
&i.Origin,
&i.Name,
)
return i, err
}
const getSessionByClientIP = `-- name: GetSessionByClientIP :one
SELECT id, created_at, updated_at, deleted_at, browser_name, browser_version, client_ipaddr, platform, is_desktop, is_mobile, is_tablet, is_tv, is_bot, challenge, is_human_first, is_human_last, profile_id FROM sessions
WHERE client_ipaddr = $1 AND deleted_at IS NULL
LIMIT 1
`
func (q *Queries) GetSessionByClientIP(ctx context.Context, clientIpaddr string) (Session, error) {
row := q.db.QueryRow(ctx, getSessionByClientIP, clientIpaddr)
var i Session
err := row.Scan(
&i.ID,
&i.CreatedAt,
&i.UpdatedAt,
&i.DeletedAt,
&i.BrowserName,
&i.BrowserVersion,
&i.ClientIpaddr,
&i.Platform,
&i.IsDesktop,
&i.IsMobile,
&i.IsTablet,
&i.IsTv,
&i.IsBot,
&i.Challenge,
&i.IsHumanFirst,
&i.IsHumanLast,
&i.ProfileID,
)
return i, err
}
const getSessionByID = `-- name: GetSessionByID :one
SELECT id, created_at, updated_at, deleted_at, browser_name, browser_version, client_ipaddr, platform, is_desktop, is_mobile, is_tablet, is_tv, is_bot, challenge, is_human_first, is_human_last, profile_id FROM sessions
WHERE id = $1 AND deleted_at IS NULL
LIMIT 1
`
func (q *Queries) GetSessionByID(ctx context.Context, id string) (Session, error) {
row := q.db.QueryRow(ctx, getSessionByID, id)
var i Session
err := row.Scan(
&i.ID,
&i.CreatedAt,
&i.UpdatedAt,
&i.DeletedAt,
&i.BrowserName,
&i.BrowserVersion,
&i.ClientIpaddr,
&i.Platform,
&i.IsDesktop,
&i.IsMobile,
&i.IsTablet,
&i.IsTv,
&i.IsBot,
&i.Challenge,
&i.IsHumanFirst,
&i.IsHumanLast,
&i.ProfileID,
)
return i, err
}
const getVaultConfigByCID = `-- name: GetVaultConfigByCID :one
SELECT id, created_at, updated_at, deleted_at, handle, origin, address, cid, config, session_id, redirect_uri FROM vaults
WHERE cid = $1
AND deleted_at IS NULL
LIMIT 1
`
func (q *Queries) GetVaultConfigByCID(ctx context.Context, cid string) (Vault, error) {
row := q.db.QueryRow(ctx, getVaultConfigByCID, cid)
var i Vault
err := row.Scan(
&i.ID,
&i.CreatedAt,
&i.UpdatedAt,
&i.DeletedAt,
&i.Handle,
&i.Origin,
&i.Address,
&i.Cid,
&i.Config,
&i.SessionID,
&i.RedirectUri,
)
return i, err
}
const getVaultRedirectURIBySessionID = `-- name: GetVaultRedirectURIBySessionID :one
SELECT redirect_uri FROM vaults
WHERE session_id = $1
AND deleted_at IS NULL
LIMIT 1
`
func (q *Queries) GetVaultRedirectURIBySessionID(ctx context.Context, sessionID int64) (string, error) {
row := q.db.QueryRow(ctx, getVaultRedirectURIBySessionID, sessionID)
var redirect_uri string
err := row.Scan(&redirect_uri)
return redirect_uri, err
}
const insertCredential = `-- name: InsertCredential :one
INSERT INTO credentials (
handle,
credential_id,
origin,
type,
transports
) VALUES ($1, $2, $3, $4, $5)
RETURNING id, created_at, updated_at, deleted_at, handle, credential_id, authenticator_attachment, origin, type, transports
`
type InsertCredentialParams struct {
Handle string
CredentialID string
Origin string
Type string
Transports string
}
func (q *Queries) InsertCredential(ctx context.Context, arg InsertCredentialParams) (Credential, error) {
row := q.db.QueryRow(ctx, insertCredential,
arg.Handle,
arg.CredentialID,
arg.Origin,
arg.Type,
arg.Transports,
)
var i Credential
err := row.Scan(
&i.ID,
&i.CreatedAt,
&i.UpdatedAt,
&i.DeletedAt,
&i.Handle,
&i.CredentialID,
&i.AuthenticatorAttachment,
&i.Origin,
&i.Type,
&i.Transports,
)
return i, err
}
const insertProfile = `-- name: InsertProfile :one
INSERT INTO profiles (
address,
handle,
origin,
name
) VALUES ($1, $2, $3, $4)
RETURNING id, created_at, updated_at, deleted_at, address, handle, origin, name
`
type InsertProfileParams struct {
Address string
Handle string
Origin string
Name string
}
func (q *Queries) InsertProfile(ctx context.Context, arg InsertProfileParams) (Profile, error) {
row := q.db.QueryRow(ctx, insertProfile,
arg.Address,
arg.Handle,
arg.Origin,
arg.Name,
)
var i Profile
err := row.Scan(
&i.ID,
&i.CreatedAt,
&i.UpdatedAt,
&i.DeletedAt,
&i.Address,
&i.Handle,
&i.Origin,
&i.Name,
)
return i, err
}
const softDeleteCredential = `-- name: SoftDeleteCredential :exec
UPDATE credentials
SET deleted_at = CURRENT_TIMESTAMP
WHERE credential_id = $1
`
func (q *Queries) SoftDeleteCredential(ctx context.Context, credentialID string) error {
_, err := q.db.Exec(ctx, softDeleteCredential, credentialID)
return err
}
const softDeleteProfile = `-- name: SoftDeleteProfile :exec
UPDATE profiles
SET deleted_at = CURRENT_TIMESTAMP
WHERE address = $1
`
func (q *Queries) SoftDeleteProfile(ctx context.Context, address string) error {
_, err := q.db.Exec(ctx, softDeleteProfile, address)
return err
}
const updateProfile = `-- name: UpdateProfile :one
UPDATE profiles
SET
name = $1,
handle = $2,
updated_at = CURRENT_TIMESTAMP
WHERE address = $3
AND deleted_at IS NULL
RETURNING id, created_at, updated_at, deleted_at, address, handle, origin, name
`
type UpdateProfileParams struct {
Name string
Handle string
Address string
}
func (q *Queries) UpdateProfile(ctx context.Context, arg UpdateProfileParams) (Profile, error) {
row := q.db.QueryRow(ctx, updateProfile, arg.Name, arg.Handle, arg.Address)
var i Profile
err := row.Scan(
&i.ID,
&i.CreatedAt,
&i.UpdatedAt,
&i.DeletedAt,
&i.Address,
&i.Handle,
&i.Origin,
&i.Name,
)
return i, err
}
const updateSessionHumanVerification = `-- name: UpdateSessionHumanVerification :one
UPDATE sessions
SET
is_human_first = $1,
is_human_last = $2,
updated_at = CURRENT_TIMESTAMP
WHERE id = $3
RETURNING id, created_at, updated_at, deleted_at, browser_name, browser_version, client_ipaddr, platform, is_desktop, is_mobile, is_tablet, is_tv, is_bot, challenge, is_human_first, is_human_last, profile_id
`
type UpdateSessionHumanVerificationParams struct {
IsHumanFirst bool
IsHumanLast bool
ID string
}
func (q *Queries) UpdateSessionHumanVerification(ctx context.Context, arg UpdateSessionHumanVerificationParams) (Session, error) {
row := q.db.QueryRow(ctx, updateSessionHumanVerification, arg.IsHumanFirst, arg.IsHumanLast, arg.ID)
var i Session
err := row.Scan(
&i.ID,
&i.CreatedAt,
&i.UpdatedAt,
&i.DeletedAt,
&i.BrowserName,
&i.BrowserVersion,
&i.ClientIpaddr,
&i.Platform,
&i.IsDesktop,
&i.IsMobile,
&i.IsTablet,
&i.IsTv,
&i.IsBot,
&i.Challenge,
&i.IsHumanFirst,
&i.IsHumanLast,
&i.ProfileID,
)
return i, err
}
const updateSessionWithProfileID = `-- name: UpdateSessionWithProfileID :one
UPDATE sessions
SET
profile_id = $1,
updated_at = CURRENT_TIMESTAMP
WHERE id = $2
RETURNING id, created_at, updated_at, deleted_at, browser_name, browser_version, client_ipaddr, platform, is_desktop, is_mobile, is_tablet, is_tv, is_bot, challenge, is_human_first, is_human_last, profile_id
`
type UpdateSessionWithProfileIDParams struct {
ProfileID string
ID string
}
func (q *Queries) UpdateSessionWithProfileID(ctx context.Context, arg UpdateSessionWithProfileIDParams) (Session, error) {
row := q.db.QueryRow(ctx, updateSessionWithProfileID, arg.ProfileID, arg.ID)
var i Session
err := row.Scan(
&i.ID,
&i.CreatedAt,
&i.UpdatedAt,
&i.DeletedAt,
&i.BrowserName,
&i.BrowserVersion,
&i.ClientIpaddr,
&i.Platform,
&i.IsDesktop,
&i.IsMobile,
&i.IsTablet,
&i.IsTv,
&i.IsBot,
&i.Challenge,
&i.IsHumanFirst,
&i.IsHumanLast,
&i.ProfileID,
)
return i, err
}

View File

@ -2,7 +2,7 @@
// versions:
// sqlc v1.27.0
package repository
package motrorm
import (
"context"

View File

@ -2,7 +2,7 @@
// versions:
// sqlc v1.27.0
package repository
package motrorm
import (
"database/sql"
@ -41,7 +41,7 @@ type Asset struct {
}
type Credential struct {
ID int64
ID string
CreatedAt time.Time
UpdatedAt time.Time
DeletedAt sql.NullTime
@ -54,7 +54,7 @@ type Credential struct {
}
type Profile struct {
ID int64
ID string
CreatedAt time.Time
UpdatedAt time.Time
DeletedAt sql.NullTime
@ -85,7 +85,7 @@ type Session struct {
}
type Vault struct {
ID int64
ID string
CreatedAt time.Time
UpdatedAt time.Time
DeletedAt sql.NullTime

View File

@ -3,7 +3,7 @@
// sqlc v1.27.0
// source: query.sql
package repository
package motrorm
import (
"context"
@ -246,7 +246,7 @@ WHERE id = ? AND deleted_at IS NULL
LIMIT 1
`
func (q *Queries) GetProfileByID(ctx context.Context, id int64) (Profile, error) {
func (q *Queries) GetProfileByID(ctx context.Context, id string) (Profile, error) {
row := q.db.QueryRowContext(ctx, getProfileByID, id)
var i Profile
err := row.Scan(

View File

@ -1,17 +0,0 @@
// Code generated from Pkl module `sonr.orm.Models`. DO NOT EDIT.
package models
import "github.com/apple/pkl-go/pkl"
func init() {
pkl.RegisterMapping("sonr.orm.Models", Models{})
pkl.RegisterMapping("sonr.orm.Models#Account", Account{})
pkl.RegisterMapping("sonr.orm.Models#Asset", Asset{})
pkl.RegisterMapping("sonr.orm.Models#Chain", Chain{})
pkl.RegisterMapping("sonr.orm.Models#Credential", Credential{})
pkl.RegisterMapping("sonr.orm.Models#DID", DID{})
pkl.RegisterMapping("sonr.orm.Models#JWK", JWK{})
pkl.RegisterMapping("sonr.orm.Models#Grant", Grant{})
pkl.RegisterMapping("sonr.orm.Models#Keyshare", Keyshare{})
pkl.RegisterMapping("sonr.orm.Models#Profile", Profile{})
}

View File

@ -1,46 +0,0 @@
// Code generated from Pkl module `sonr.orm.Models`. DO NOT EDIT.
package keyalgorithm
import (
"encoding"
"fmt"
)
type KeyAlgorithm string
const (
Es256 KeyAlgorithm = "es256"
Es384 KeyAlgorithm = "es384"
Es512 KeyAlgorithm = "es512"
Eddsa KeyAlgorithm = "eddsa"
Es256k KeyAlgorithm = "es256k"
Ecdsa KeyAlgorithm = "ecdsa"
)
// String returns the string representation of KeyAlgorithm
func (rcv KeyAlgorithm) String() string {
return string(rcv)
}
var _ encoding.BinaryUnmarshaler = new(KeyAlgorithm)
// UnmarshalBinary implements encoding.BinaryUnmarshaler for KeyAlgorithm.
func (rcv *KeyAlgorithm) UnmarshalBinary(data []byte) error {
switch str := string(data); str {
case "es256":
*rcv = Es256
case "es384":
*rcv = Es384
case "es512":
*rcv = Es512
case "eddsa":
*rcv = Eddsa
case "es256k":
*rcv = Es256k
case "ecdsa":
*rcv = Ecdsa
default:
return fmt.Errorf(`illegal: "%s" is not a valid KeyAlgorithm`, str)
}
return nil
}

View File

@ -1,58 +0,0 @@
// Code generated from Pkl module `sonr.orm.Models`. DO NOT EDIT.
package keycurve
import (
"encoding"
"fmt"
)
type KeyCurve string
const (
P256 KeyCurve = "p256"
P384 KeyCurve = "p384"
P521 KeyCurve = "p521"
X25519 KeyCurve = "x25519"
X448 KeyCurve = "x448"
Ed25519 KeyCurve = "ed25519"
Ed448 KeyCurve = "ed448"
Secp256k1 KeyCurve = "secp256k1"
Bls12381 KeyCurve = "bls12381"
Keccak256 KeyCurve = "keccak256"
)
// String returns the string representation of KeyCurve
func (rcv KeyCurve) String() string {
return string(rcv)
}
var _ encoding.BinaryUnmarshaler = new(KeyCurve)
// UnmarshalBinary implements encoding.BinaryUnmarshaler for KeyCurve.
func (rcv *KeyCurve) UnmarshalBinary(data []byte) error {
switch str := string(data); str {
case "p256":
*rcv = P256
case "p384":
*rcv = P384
case "p521":
*rcv = P521
case "x25519":
*rcv = X25519
case "x448":
*rcv = X448
case "ed25519":
*rcv = Ed25519
case "ed448":
*rcv = Ed448
case "secp256k1":
*rcv = Secp256k1
case "bls12381":
*rcv = Bls12381
case "keccak256":
*rcv = Keccak256
default:
return fmt.Errorf(`illegal: "%s" is not a valid KeyCurve`, str)
}
return nil
}

View File

@ -1,37 +0,0 @@
// Code generated from Pkl module `sonr.orm.Models`. DO NOT EDIT.
package keyencoding
import (
"encoding"
"fmt"
)
type KeyEncoding string
const (
Raw KeyEncoding = "raw"
Hex KeyEncoding = "hex"
Multibase KeyEncoding = "multibase"
)
// String returns the string representation of KeyEncoding
func (rcv KeyEncoding) String() string {
return string(rcv)
}
var _ encoding.BinaryUnmarshaler = new(KeyEncoding)
// UnmarshalBinary implements encoding.BinaryUnmarshaler for KeyEncoding.
func (rcv *KeyEncoding) UnmarshalBinary(data []byte) error {
switch str := string(data); str {
case "raw":
*rcv = Raw
case "hex":
*rcv = Hex
case "multibase":
*rcv = Multibase
default:
return fmt.Errorf(`illegal: "%s" is not a valid KeyEncoding`, str)
}
return nil
}

View File

@ -1,40 +0,0 @@
// Code generated from Pkl module `sonr.orm.Models`. DO NOT EDIT.
package keyrole
import (
"encoding"
"fmt"
)
type KeyRole string
const (
Authentication KeyRole = "authentication"
Assertion KeyRole = "assertion"
Delegation KeyRole = "delegation"
Invocation KeyRole = "invocation"
)
// String returns the string representation of KeyRole
func (rcv KeyRole) String() string {
return string(rcv)
}
var _ encoding.BinaryUnmarshaler = new(KeyRole)
// UnmarshalBinary implements encoding.BinaryUnmarshaler for KeyRole.
func (rcv *KeyRole) UnmarshalBinary(data []byte) error {
switch str := string(data); str {
case "authentication":
*rcv = Authentication
case "assertion":
*rcv = Assertion
case "delegation":
*rcv = Delegation
case "invocation":
*rcv = Invocation
default:
return fmt.Errorf(`illegal: "%s" is not a valid KeyRole`, str)
}
return nil
}

View File

@ -1,34 +0,0 @@
// Code generated from Pkl module `sonr.orm.Models`. DO NOT EDIT.
package keysharerole
import (
"encoding"
"fmt"
)
type KeyShareRole string
const (
User KeyShareRole = "user"
Validator KeyShareRole = "validator"
)
// String returns the string representation of KeyShareRole
func (rcv KeyShareRole) String() string {
return string(rcv)
}
var _ encoding.BinaryUnmarshaler = new(KeyShareRole)
// UnmarshalBinary implements encoding.BinaryUnmarshaler for KeyShareRole.
func (rcv *KeyShareRole) UnmarshalBinary(data []byte) error {
switch str := string(data); str {
case "user":
*rcv = User
case "validator":
*rcv = Validator
default:
return fmt.Errorf(`illegal: "%s" is not a valid KeyShareRole`, str)
}
return nil
}

View File

@ -1,55 +0,0 @@
// Code generated from Pkl module `sonr.orm.Models`. DO NOT EDIT.
package keytype
import (
"encoding"
"fmt"
)
type KeyType string
const (
Octet KeyType = "octet"
Elliptic KeyType = "elliptic"
Rsa KeyType = "rsa"
Symmetric KeyType = "symmetric"
Hmac KeyType = "hmac"
Mpc KeyType = "mpc"
Zk KeyType = "zk"
Webauthn KeyType = "webauthn"
Bip32 KeyType = "bip32"
)
// String returns the string representation of KeyType
func (rcv KeyType) String() string {
return string(rcv)
}
var _ encoding.BinaryUnmarshaler = new(KeyType)
// UnmarshalBinary implements encoding.BinaryUnmarshaler for KeyType.
func (rcv *KeyType) UnmarshalBinary(data []byte) error {
switch str := string(data); str {
case "octet":
*rcv = Octet
case "elliptic":
*rcv = Elliptic
case "rsa":
*rcv = Rsa
case "symmetric":
*rcv = Symmetric
case "hmac":
*rcv = Hmac
case "mpc":
*rcv = Mpc
case "zk":
*rcv = Zk
case "webauthn":
*rcv = Webauthn
case "bip32":
*rcv = Bip32
default:
return fmt.Errorf(`illegal: "%s" is not a valid KeyType`, str)
}
return nil
}

View File

@ -1,46 +0,0 @@
// Code generated from Pkl module `sonr.orm.Models`. DO NOT EDIT.
package permissiongrant
import (
"encoding"
"fmt"
)
type PermissionGrant string
const (
None PermissionGrant = "none"
Read PermissionGrant = "read"
Write PermissionGrant = "write"
Verify PermissionGrant = "verify"
Broadcast PermissionGrant = "broadcast"
Admin PermissionGrant = "admin"
)
// String returns the string representation of PermissionGrant
func (rcv PermissionGrant) String() string {
return string(rcv)
}
var _ encoding.BinaryUnmarshaler = new(PermissionGrant)
// UnmarshalBinary implements encoding.BinaryUnmarshaler for PermissionGrant.
func (rcv *PermissionGrant) UnmarshalBinary(data []byte) error {
switch str := string(data); str {
case "none":
*rcv = None
case "read":
*rcv = Read
case "write":
*rcv = Write
case "verify":
*rcv = Verify
case "broadcast":
*rcv = Broadcast
case "admin":
*rcv = Admin
default:
return fmt.Errorf(`illegal: "%s" is not a valid PermissionGrant`, str)
}
return nil
}

View File

@ -1,49 +0,0 @@
// Code generated from Pkl module `sonr.orm.Models`. DO NOT EDIT.
package permissionscope
import (
"encoding"
"fmt"
)
type PermissionScope string
const (
Profile PermissionScope = "profile"
Metadata PermissionScope = "metadata"
Permissions PermissionScope = "permissions"
Wallets PermissionScope = "wallets"
Transactions PermissionScope = "transactions"
User PermissionScope = "user"
Validator PermissionScope = "validator"
)
// String returns the string representation of PermissionScope
func (rcv PermissionScope) String() string {
return string(rcv)
}
var _ encoding.BinaryUnmarshaler = new(PermissionScope)
// UnmarshalBinary implements encoding.BinaryUnmarshaler for PermissionScope.
func (rcv *PermissionScope) UnmarshalBinary(data []byte) error {
switch str := string(data); str {
case "profile":
*rcv = Profile
case "metadata":
*rcv = Metadata
case "permissions":
*rcv = Permissions
case "wallets":
*rcv = Wallets
case "transactions":
*rcv = Transactions
case "user":
*rcv = User
case "validator":
*rcv = Validator
default:
return fmt.Errorf(`illegal: "%s" is not a valid PermissionScope`, str)
}
return nil
}

View File

@ -0,0 +1,138 @@
-- name: InsertCredential :one
INSERT INTO credentials (
handle,
credential_id,
origin,
type,
transports
) VALUES ($1, $2, $3, $4, $5)
RETURNING *;
-- name: InsertProfile :one
INSERT INTO profiles (
address,
handle,
origin,
name
) VALUES ($1, $2, $3, $4)
RETURNING *;
-- name: GetProfileByID :one
SELECT * FROM profiles
WHERE id = $1 AND deleted_at IS NULL
LIMIT 1;
-- name: GetProfileByAddress :one
SELECT * FROM profiles
WHERE address = $1 AND deleted_at IS NULL
LIMIT 1;
-- name: GetChallengeBySessionID :one
SELECT challenge FROM sessions
WHERE id = $1 AND deleted_at IS NULL
LIMIT 1;
-- name: GetHumanVerificationNumbers :one
SELECT is_human_first, is_human_last FROM sessions
WHERE id = $1 AND deleted_at IS NULL
LIMIT 1;
-- name: GetSessionByID :one
SELECT * FROM sessions
WHERE id = $1 AND deleted_at IS NULL
LIMIT 1;
-- name: GetSessionByClientIP :one
SELECT * FROM sessions
WHERE client_ipaddr = $1 AND deleted_at IS NULL
LIMIT 1;
-- name: UpdateSessionHumanVerification :one
UPDATE sessions
SET
is_human_first = $1,
is_human_last = $2,
updated_at = CURRENT_TIMESTAMP
WHERE id = $3
RETURNING *;
-- name: UpdateSessionWithProfileID :one
UPDATE sessions
SET
profile_id = $1,
updated_at = CURRENT_TIMESTAMP
WHERE id = $2
RETURNING *;
-- name: CheckHandleExists :one
SELECT COUNT(*) > 0 as handle_exists FROM profiles
WHERE handle = $1
AND deleted_at IS NULL;
-- name: GetCredentialsByHandle :many
SELECT * FROM credentials
WHERE handle = $1
AND deleted_at IS NULL;
-- name: GetCredentialByID :one
SELECT * FROM credentials
WHERE credential_id = $1
AND deleted_at IS NULL
LIMIT 1;
-- name: SoftDeleteCredential :exec
UPDATE credentials
SET deleted_at = CURRENT_TIMESTAMP
WHERE credential_id = $1;
-- name: SoftDeleteProfile :exec
UPDATE profiles
SET deleted_at = CURRENT_TIMESTAMP
WHERE address = $1;
-- name: UpdateProfile :one
UPDATE profiles
SET
name = $1,
handle = $2,
updated_at = CURRENT_TIMESTAMP
WHERE address = $3
AND deleted_at IS NULL
RETURNING *;
-- name: GetProfileByHandle :one
SELECT * FROM profiles
WHERE handle = $1
AND deleted_at IS NULL
LIMIT 1;
-- name: GetVaultConfigByCID :one
SELECT * FROM vaults
WHERE cid = $1
AND deleted_at IS NULL
LIMIT 1;
-- name: GetVaultRedirectURIBySessionID :one
SELECT redirect_uri FROM vaults
WHERE session_id = $1
AND deleted_at IS NULL
LIMIT 1;
-- name: CreateSession :one
INSERT INTO sessions (
id,
browser_name,
browser_version,
client_ipaddr,
platform,
is_desktop,
is_mobile,
is_tablet,
is_tv,
is_bot,
challenge,
is_human_first,
is_human_last,
profile_id
) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14)
RETURNING *;

View File

@ -0,0 +1,121 @@
-- Profiles represent user identities
CREATE TABLE profiles (
id TEXT PRIMARY KEY,
created_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP,
deleted_at TIMESTAMPTZ,
address TEXT NOT NULL,
handle TEXT NOT NULL UNIQUE,
origin TEXT NOT NULL,
name TEXT NOT NULL,
UNIQUE(address, origin)
);
-- Accounts represent blockchain accounts
CREATE TABLE accounts (
id TEXT PRIMARY KEY,
created_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP,
deleted_at TIMESTAMPTZ,
number BIGINT NOT NULL,
sequence INT NOT NULL DEFAULT 0,
address TEXT NOT NULL UNIQUE,
public_key TEXT NOT NULL,
chain_id TEXT NOT NULL,
controller TEXT NOT NULL,
is_subsidiary BOOLEAN NOT NULL DEFAULT FALSE,
is_validator BOOLEAN NOT NULL DEFAULT FALSE,
is_delegator BOOLEAN NOT NULL DEFAULT FALSE,
is_accountable BOOLEAN NOT NULL DEFAULT TRUE
);
-- Assets represent tokens and coins
CREATE TABLE assets (
id TEXT PRIMARY KEY,
created_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP,
deleted_at TIMESTAMPTZ,
name TEXT NOT NULL,
symbol TEXT NOT NULL,
decimals INT NOT NULL CHECK(decimals >= 0),
chain_id TEXT NOT NULL,
channel TEXT NOT NULL,
asset_type TEXT NOT NULL,
coingecko_id TEXT,
UNIQUE(chain_id, symbol)
);
-- Credentials store WebAuthn credentials
CREATE TABLE credentials (
id TEXT PRIMARY KEY,
created_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP,
deleted_at TIMESTAMPTZ,
handle TEXT NOT NULL,
credential_id TEXT NOT NULL UNIQUE,
authenticator_attachment TEXT NOT NULL,
origin TEXT NOT NULL,
type TEXT NOT NULL,
transports TEXT NOT NULL
);
-- Sessions track user authentication state
CREATE TABLE sessions (
id TEXT PRIMARY KEY,
created_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP,
deleted_at TIMESTAMPTZ,
browser_name TEXT NOT NULL,
browser_version TEXT NOT NULL,
client_ipaddr TEXT NOT NULL,
platform TEXT NOT NULL,
is_desktop BOOLEAN NOT NULL DEFAULT FALSE,
is_mobile BOOLEAN NOT NULL DEFAULT FALSE,
is_tablet BOOLEAN NOT NULL DEFAULT FALSE,
is_tv BOOLEAN NOT NULL DEFAULT FALSE,
is_bot BOOLEAN NOT NULL DEFAULT FALSE,
challenge TEXT NOT NULL,
is_human_first BOOLEAN NOT NULL DEFAULT FALSE,
is_human_last BOOLEAN NOT NULL DEFAULT FALSE,
profile_id TEXT NOT NULL REFERENCES profiles(id),
FOREIGN KEY (profile_id) REFERENCES profiles(id)
);
-- Vaults store encrypted data
CREATE TABLE vaults (
id BIGSERIAL PRIMARY KEY,
created_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP,
deleted_at TIMESTAMPTZ,
handle TEXT NOT NULL,
origin TEXT NOT NULL,
address TEXT NOT NULL,
cid TEXT NOT NULL UNIQUE,
config JSONB NOT NULL,
session_id BIGINT NOT NULL,
redirect_uri TEXT NOT NULL,
FOREIGN KEY (session_id) REFERENCES sessions(id)
);
-- Indexes for common queries
CREATE INDEX idx_profiles_handle ON profiles(handle);
CREATE INDEX idx_profiles_address ON profiles(address);
CREATE INDEX idx_profiles_deleted_at ON profiles(deleted_at);
CREATE INDEX idx_accounts_address ON accounts(address);
CREATE INDEX idx_accounts_chain_id ON accounts(chain_id);
CREATE INDEX idx_accounts_deleted_at ON accounts(deleted_at);
CREATE INDEX idx_assets_symbol ON assets(symbol);
CREATE INDEX idx_assets_chain_id ON assets(chain_id);
CREATE INDEX idx_assets_deleted_at ON assets(deleted_at);
CREATE INDEX idx_credentials_handle ON credentials(handle);
CREATE INDEX idx_credentials_origin ON credentials(origin);
CREATE INDEX idx_credentials_deleted_at ON credentials(deleted_at);
CREATE INDEX idx_sessions_profile_id ON sessions(profile_id);
CREATE INDEX idx_sessions_client_ipaddr ON sessions(client_ipaddr);
CREATE INDEX idx_sessions_deleted_at ON sessions(deleted_at);

View File

@ -1,8 +1,8 @@
package sink
package sqlite
import (
_ "embed"
)
//go:embed schema.sql
var SchemaSQL string
var SchemaMotrSQL string

19
internal/models/sqlc.yaml Normal file
View File

@ -0,0 +1,19 @@
version: "2"
sql:
- engine: "sqlite"
queries: "./sink/sqlite/query.sql"
schema: "./sink/sqlite/schema.sql"
gen:
go:
package: "motrorm"
out: "drivers/motrorm"
- engine: "postgresql"
queries: "./sink/postgres/query.sql"
schema: "./sink/postgres/schema.sql"
gen:
go:
package: "hwayorm"
out: "drivers/hwayorm"
sql_package: "pgx/v5"

View File

@ -23,8 +23,8 @@ templ Handle() {
<br/>
}
templ HandleError(value string) {
<sl-input name="handle" placeholder="digitalgold" type="text" label="Handle" minlength="4" maxlength="12" required class="border-red-500" value={ value }>
templ HandleError(value string, helpText string) {
<sl-input name="handle" placeholder="digitalgold" type="text" label="Handle" minlength="4" maxlength="12" required class="border-red-500" value={ value } help-text={ helpText }>
<div slot="prefix">
<sl-icon name="at-sign" library="sonr"></sl-icon>
</div>

View File

@ -49,7 +49,7 @@ func Handle() templ.Component {
})
}
func HandleError(value string) templ.Component {
func HandleError(value string, helpText string) templ.Component {
return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil {
@ -83,6 +83,19 @@ func HandleError(value string) templ.Component {
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString("\" help-text=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var4 string
templ_7745c5c3_Var4, templ_7745c5c3_Err = templ.JoinStringErrs(helpText)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `internal/nebula/input/input_handle.templ`, Line: 27, Col: 175}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var4))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString("\"><div slot=\"prefix\"><sl-icon name=\"at-sign\" library=\"sonr\"></sl-icon></div><div slot=\"suffix\" style=\"color: #B54549;\"><sl-icon name=\"x\"></sl-icon></div></sl-input><br>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
@ -107,21 +120,21 @@ func HandleSuccess(value string) templ.Component {
}()
}
ctx = templ.InitializeContext(ctx)
templ_7745c5c3_Var4 := templ.GetChildren(ctx)
if templ_7745c5c3_Var4 == nil {
templ_7745c5c3_Var4 = templ.NopComponent
templ_7745c5c3_Var5 := templ.GetChildren(ctx)
if templ_7745c5c3_Var5 == nil {
templ_7745c5c3_Var5 = templ.NopComponent
}
ctx = templ.ClearChildren(ctx)
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString("<sl-input name=\"handle\" placeholder=\"digitalgold\" type=\"text\" label=\"Handle\" minlength=\"4\" maxlength=\"12\" required class=\"border-green-500\" value=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var5 string
templ_7745c5c3_Var5, templ_7745c5c3_Err = templ.JoinStringErrs(value)
var templ_7745c5c3_Var6 string
templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(value)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `internal/nebula/input/input_handle.templ`, Line: 39, Col: 154}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var5))
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}

View File

@ -6,7 +6,7 @@ import (
"github.com/labstack/echo/v4"
echomiddleware "github.com/labstack/echo/v4/middleware"
config "github.com/onsonr/sonr/internal/config/hway"
"github.com/onsonr/sonr/internal/database"
"github.com/onsonr/sonr/internal/models/drivers/hwayorm"
"github.com/onsonr/sonr/pkg/common"
"github.com/onsonr/sonr/pkg/gateway/middleware"
"github.com/onsonr/sonr/pkg/gateway/routes"
@ -15,11 +15,8 @@ import (
type Gateway = *echo.Echo
// New returns a new Gateway instance
func New(env config.Hway, ipc common.IPFS) (Gateway, error) {
db, err := database.NewDB(env)
if err != nil {
return nil, err
}
func New(env config.Hway, ipc common.IPFS, dbq *hwayorm.Queries) (Gateway, error) {
e := echo.New()
// Override default behaviors
e.IPExtractor = echo.ExtractIPDirect()
@ -29,7 +26,7 @@ func New(env config.Hway, ipc common.IPFS) (Gateway, error) {
e.Use(echoprometheus.NewMiddleware("hway"))
e.Use(echomiddleware.Logger())
e.Use(echomiddleware.Recover())
e.Use(middleware.UseGateway(env, ipc, db))
e.Use(middleware.UseGateway(env, ipc, dbq))
routes.Register(e)
return e, nil
}

View File

@ -3,13 +3,16 @@ package handlers
import (
"github.com/labstack/echo/v4"
"github.com/onsonr/sonr/pkg/gateway/middleware"
"github.com/onsonr/sonr/internal/nebula/input"
"github.com/onsonr/sonr/pkg/gateway/middleware"
)
// ValidateProfileHandle finds the chosen handle and verifies it is unique
func ValidateProfileHandle(c echo.Context) error {
// CheckProfileHandle finds the chosen handle and verifies it is unique
func CheckProfileHandle(c echo.Context) error {
handle := c.FormValue("handle")
if handle == "" {
return middleware.Render(c, input.HandleError(handle, "Please enter a valid handle"))
}
//
// if ok {
// return middleware.Render(c, input.HandleError(handle))
@ -19,7 +22,7 @@ func ValidateProfileHandle(c echo.Context) error {
}
// ValidateProfileHandle finds the chosen handle and verifies it is unique
func ValidateIsHumanSum(c echo.Context) error {
func CheckIsHumanSum(c echo.Context) error {
// data := context.GetCreateProfileData(c)
// value := c.FormValue("is_human")
// intValue, err := strconv.Atoi(value)

View File

@ -5,12 +5,24 @@ import (
"github.com/onsonr/sonr/pkg/gateway/middleware"
)
func RenderIndex(c echo.Context) error {
func HandleIndex(c echo.Context) error {
id := middleware.GetSessionID(c)
if id == "" {
return startNewSession(c)
}
return middleware.RenderInitial(c)
}
func startNewSession(c echo.Context) error {
// Initialize the session
err := middleware.NewSession(c)
if err != nil {
return middleware.RenderError(c, err)
}
// Render the initial view
return middleware.RenderInitial(c)
}
func continueExistingSession(c echo.Context, id string) error {
// Do some auth checks here
return middleware.RenderInitial(c)
}

View File

@ -3,21 +3,20 @@ package handlers
import (
"github.com/labstack/echo/v4"
"github.com/onsonr/sonr/pkg/gateway/middleware"
"github.com/onsonr/sonr/pkg/gateway/types"
"github.com/onsonr/sonr/pkg/gateway/views"
)
func RenderProfileCreate(c echo.Context) error {
// numF, numL := middleware.GetHumanVerificationNumbers(c)
params := types.CreateProfileParams{
params := middleware.CreateProfileParams{
FirstNumber: int(middleware.CurrentBlock(c)),
LastNumber: int(middleware.CurrentBlock(c)),
}
return middleware.Render(c, views.RegisterProfileView(params))
return middleware.Render(c, views.RegisterProfileView(params.FirstNumber, params.LastNumber))
}
func RenderPasskeyCreate(c echo.Context) error {
return middleware.Render(c, views.RegisterPasskeyView(types.CreatePasskeyParams{}))
return middleware.Render(c, views.RegisterPasskeyView("", "", "", "", ""))
}
func RenderVaultLoading(c echo.Context) error {

View File

@ -4,7 +4,6 @@ import (
"encoding/json"
"github.com/labstack/echo/v4"
"github.com/onsonr/sonr/pkg/gateway/types"
"github.com/onsonr/sonr/pkg/gateway/middleware"
)
@ -16,7 +15,7 @@ func SubmitProfileHandle(c echo.Context) error {
// SubmitPublicKeyCredential submits a public key credential
func SubmitPublicKeyCredential(c echo.Context) error {
credentialJSON := c.FormValue("credential")
cred := &types.CredentialDescriptor{}
cred := &middleware.CredentialDescriptor{}
// Unmarshal the credential JSON
if err := json.Unmarshal([]byte(credentialJSON), cred); err != nil {
return middleware.RenderError(c, err)

View File

@ -4,11 +4,10 @@ import (
"net/http"
"github.com/labstack/echo/v4"
"github.com/onsonr/sonr/internal/database/repository"
"github.com/onsonr/sonr/pkg/gateway/types"
"github.com/onsonr/sonr/internal/models/drivers/hwayorm"
)
func ListCredentials(c echo.Context, handle string) ([]*types.CredentialDescriptor, error) {
func ListCredentials(c echo.Context, handle string) ([]*CredentialDescriptor, error) {
cc, ok := c.(*GatewayContext)
if !ok {
return nil, echo.NewHTTPError(http.StatusInternalServerError, "Credentials Context not found")
@ -17,10 +16,10 @@ func ListCredentials(c echo.Context, handle string) ([]*types.CredentialDescript
if err != nil {
return nil, err
}
return types.CredentialArrayToDescriptors(creds), nil
return CredentialArrayToDescriptors(creds), nil
}
func SubmitCredential(c echo.Context, cred *types.CredentialDescriptor) error {
func SubmitCredential(c echo.Context, cred *CredentialDescriptor) error {
origin := GetOrigin(c)
handle := GetHandle(c)
md := cred.ToModel(handle, origin)
@ -30,7 +29,7 @@ func SubmitCredential(c echo.Context, cred *types.CredentialDescriptor) error {
return echo.NewHTTPError(http.StatusInternalServerError, "Credentials Context not found")
}
_, err := cc.dbq.InsertCredential(bgCtx(), repository.InsertCredentialParams{
_, err := cc.dbq.InsertCredential(bgCtx(), hwayorm.InsertCredentialParams{
Handle: handle,
CredentialID: md.CredentialID,
Origin: origin,
@ -42,3 +41,43 @@ func SubmitCredential(c echo.Context, cred *types.CredentialDescriptor) error {
}
return nil
}
// Define the credential structure matching our frontend data
type CredentialDescriptor struct {
ID string `json:"id"`
RawID string `json:"rawId"`
Type string `json:"type"`
AuthenticatorAttachment string `json:"authenticatorAttachment"`
Transports string `json:"transports"`
ClientExtensionResults map[string]string `json:"clientExtensionResults"`
Response struct {
AttestationObject string `json:"attestationObject"`
ClientDataJSON string `json:"clientDataJSON"`
} `json:"response"`
}
func (c *CredentialDescriptor) ToModel(handle, origin string) *hwayorm.Credential {
return &hwayorm.Credential{
Handle: handle,
Origin: origin,
CredentialID: c.ID,
Type: c.Type,
Transports: c.Transports,
AuthenticatorAttachment: c.AuthenticatorAttachment,
}
}
func CredentialArrayToDescriptors(credentials []hwayorm.Credential) []*CredentialDescriptor {
var descriptors []*CredentialDescriptor
for _, cred := range credentials {
cd := &CredentialDescriptor{
ID: cred.CredentialID,
RawID: cred.CredentialID,
Type: cred.Type,
AuthenticatorAttachment: cred.AuthenticatorAttachment,
Transports: cred.Transports,
}
descriptors = append(descriptors, cd)
}
return descriptors
}

View File

@ -1,13 +1,11 @@
package middleware
import (
"database/sql"
"github.com/labstack/echo/v4"
"github.com/medama-io/go-useragent"
"github.com/onsonr/sonr/crypto/mpc"
"github.com/onsonr/sonr/internal/config/hway"
"github.com/onsonr/sonr/internal/database/repository"
"github.com/onsonr/sonr/internal/models/drivers/hwayorm"
"github.com/onsonr/sonr/pkg/common"
)
@ -15,21 +13,21 @@ type GatewayContext struct {
echo.Context
agent useragent.UserAgent
id string
dbq *repository.Queries
dbq *hwayorm.Queries
ipfsClient common.IPFS
tokenStore common.IPFSTokenStore
stagedEnclaves map[string]mpc.Enclave
grpcAddr string
}
func UseGateway(env hway.Hway, ipc common.IPFS, db *sql.DB) echo.MiddlewareFunc {
func UseGateway(env hway.Hway, ipc common.IPFS, db *hwayorm.Queries) echo.MiddlewareFunc {
return func(next echo.HandlerFunc) echo.HandlerFunc {
return func(c echo.Context) error {
ua := useragent.NewParser()
ctx := &GatewayContext{
agent: ua.Parse(c.Request().UserAgent()),
Context: c,
dbq: repository.New(db),
dbq: db,
ipfsClient: ipc,
grpcAddr: env.GetSonrGrpcUrl(),
tokenStore: common.NewUCANStore(ipc),

View File

@ -5,7 +5,7 @@ import (
"github.com/labstack/echo/v4"
"github.com/onsonr/sonr/internal/context"
"github.com/onsonr/sonr/internal/database/repository"
repository "github.com/onsonr/sonr/internal/models/drivers/hwayorm"
)
func CheckHandleUnique(c echo.Context, handle string) bool {
@ -98,3 +98,28 @@ func DeleteProfile(c echo.Context) error {
}
return nil
}
// ╭───────────────────────────────────────────────────────────╮
// │ Create Profile (/register/profile) │
// ╰───────────────────────────────────────────────────────────╯
// DefaultCreateProfileParams returns a default CreateProfileParams
func DefaultCreateProfileParams() CreateProfileParams {
return CreateProfileParams{
TurnstileSiteKey: "",
FirstNumber: 0,
LastNumber: 0,
}
}
// CreateProfileParams represents the parameters for creating a profile
type CreateProfileParams struct {
TurnstileSiteKey string
FirstNumber int
LastNumber int
}
// Sum returns the sum of the first and last number
func (d CreateProfileParams) Sum() int {
return d.FirstNumber + d.LastNumber
}

View File

@ -3,9 +3,12 @@ package middleware
import (
gocontext "context"
"github.com/go-webauthn/webauthn/protocol"
"github.com/labstack/echo/v4"
"github.com/onsonr/sonr/internal/context"
"github.com/onsonr/sonr/internal/database"
"github.com/medama-io/go-useragent"
ctx "github.com/onsonr/sonr/internal/context"
"github.com/onsonr/sonr/internal/models/drivers/hwayorm"
"github.com/segmentio/ksuid"
)
func NewSession(c echo.Context) error {
@ -13,13 +16,13 @@ func NewSession(c echo.Context) error {
if !ok {
return nil
}
baseSessionCreateParams := database.BaseSessionCreateParams(cc)
baseSessionCreateParams := BaseSessionCreateParams(cc)
cc.id = baseSessionCreateParams.ID
if _, err := cc.dbq.CreateSession(bgCtx(), baseSessionCreateParams); err != nil {
return err
}
// Set Cookie
if err := context.WriteCookie(c, context.SessionID, cc.id); err != nil {
if err := ctx.WriteCookie(c, ctx.SessionID, cc.id); err != nil {
return err
}
return nil
@ -46,10 +49,10 @@ func GetSessionID(c echo.Context) string {
}
// check from cookie
if cc.id == "" {
if ok := context.CookieExists(c, context.SessionID); !ok {
if ok := ctx.CookieExists(c, ctx.SessionID); !ok {
return ""
}
cc.id = context.ReadCookieUnsafe(c, context.SessionID)
cc.id = ctx.ReadCookieUnsafe(c, ctx.SessionID)
}
return cc.id
}
@ -68,7 +71,7 @@ func GetSessionChallenge(c echo.Context) string {
func GetHandle(c echo.Context) string {
// First check for the cookie
handle := context.ReadCookieUnsafe(c, context.UserHandle)
handle := ctx.ReadCookieUnsafe(c, ctx.UserHandle)
if handle != "" {
return handle
}
@ -107,3 +110,50 @@ func bgCtx() gocontext.Context {
ctx := gocontext.Background()
return ctx
}
func BaseSessionCreateParams(e echo.Context) hwayorm.CreateSessionParams {
// f := rand.Intn(5) + 1
// l := rand.Intn(4) + 1
challenge, _ := protocol.CreateChallenge()
id := getOrCreateSessionID(e)
ua := useragent.NewParser()
s := ua.Parse(e.Request().UserAgent())
return hwayorm.CreateSessionParams{
ID: id,
BrowserName: s.GetBrowser(),
BrowserVersion: s.GetMajorVersion(),
ClientIpaddr: e.RealIP(),
Platform: s.GetOS(),
IsMobile: s.IsMobile(),
IsTablet: s.IsTablet(),
IsDesktop: s.IsDesktop(),
IsBot: s.IsBot(),
IsTv: s.IsTV(),
// IsHumanFirst: int64(f),
// IsHumanLast: int64(l),
Challenge: challenge.String(),
}
}
func getOrCreateSessionID(c echo.Context) string {
if ok := ctx.CookieExists(c, ctx.SessionID); !ok {
sessionID := ksuid.New().String()
ctx.WriteCookie(c, ctx.SessionID, sessionID)
return sessionID
}
sessionID, err := ctx.ReadCookie(c, ctx.SessionID)
if err != nil {
sessionID = ksuid.New().String()
ctx.WriteCookie(c, ctx.SessionID, sessionID)
}
return sessionID
}
func boolToInt64(b bool) int64 {
if b {
return 1
}
return 0
}

View File

@ -6,11 +6,10 @@ import (
"github.com/labstack/echo/v4"
"github.com/onsonr/sonr/crypto/mpc"
"github.com/onsonr/sonr/internal/context"
"github.com/onsonr/sonr/pkg/gateway/types"
"lukechampine.com/blake3"
)
func Spawn(c echo.Context) (types.CreatePasskeyParams, error) {
func Spawn(c echo.Context) (CreatePasskeyParams, error) {
cc := c.(*GatewayContext)
block := fmt.Sprintf("%d", CurrentBlock(c))
handle := GetHandle(c)
@ -19,15 +18,15 @@ func Spawn(c echo.Context) (types.CreatePasskeyParams, error) {
sid := GetSessionID(c)
nonce, err := calcNonce(sid)
if err != nil {
return types.DefaultCreatePasskeyParams(), err
return defaultCreatePasskeyParams(), err
}
encl, err := mpc.GenEnclave(nonce)
if err != nil {
return types.DefaultCreatePasskeyParams(), err
return defaultCreatePasskeyParams(), err
}
cc.stagedEnclaves[sid] = encl
context.WriteCookie(c, context.SonrAddress, encl.Address())
return types.CreatePasskeyParams{
return CreatePasskeyParams{
Address: encl.Address(),
Handle: handle,
Name: origin,
@ -36,8 +35,8 @@ func Spawn(c echo.Context) (types.CreatePasskeyParams, error) {
}, nil
}
func Claim() (types.CreatePasskeyParams, error) {
return types.CreatePasskeyParams{}, nil
func Claim() (CreatePasskeyParams, error) {
return CreatePasskeyParams{}, nil
}
// Uses blake3 to hash the sessionID to generate a nonce of length 12 bytes
@ -55,3 +54,27 @@ func calcNonce(sessionID string) ([]byte, error) {
}
return nonce, nil
}
// ╭───────────────────────────────────────────────────────────╮
// │ Create Passkey (/register/passkey) │
// ╰───────────────────────────────────────────────────────────╯
// defaultCreatePasskeyParams returns a default CreatePasskeyParams
func defaultCreatePasskeyParams() CreatePasskeyParams {
return CreatePasskeyParams{
Address: "",
Handle: "",
Name: "",
Challenge: "",
CreationBlock: "",
}
}
// CreatePasskeyParams represents the parameters for creating a passkey
type CreatePasskeyParams struct {
Address string
Handle string
Name string
Challenge string
CreationBlock string
}

View File

@ -7,14 +7,14 @@ import (
func Register(e *echo.Echo) error {
// Register View Handlers
e.GET("/", handlers.RenderIndex)
e.GET("/", handlers.HandleIndex)
e.GET("/register", handlers.RenderProfileCreate)
e.POST("/register/passkey", handlers.RenderPasskeyCreate)
e.POST("/register/finish", handlers.RenderVaultLoading)
// Register Validation Handlers
e.POST("/register/profile/handle", handlers.ValidateProfileHandle)
e.POST("/register/profile/is_human", handlers.ValidateIsHumanSum)
e.POST("/register/profile/handle", handlers.CheckProfileHandle)
e.POST("/register/profile/is_human", handlers.CheckIsHumanSum)
e.POST("/submit/profile/handle", handlers.SubmitProfileHandle)
e.POST("/submit/credential", handlers.SubmitPublicKeyCredential)
return nil

View File

@ -1,50 +0,0 @@
package types
// ╭───────────────────────────────────────────────────────────╮
// │ Create Passkey (/register/passkey) │
// ╰───────────────────────────────────────────────────────────╯
// DefaultCreatePasskeyParams returns a default CreatePasskeyParams
func DefaultCreatePasskeyParams() CreatePasskeyParams {
return CreatePasskeyParams{
Address: "",
Handle: "",
Name: "",
Challenge: "",
CreationBlock: "",
}
}
// CreatePasskeyParams represents the parameters for creating a passkey
type CreatePasskeyParams struct {
Address string
Handle string
Name string
Challenge string
CreationBlock string
}
// ╭───────────────────────────────────────────────────────────╮
// │ Create Profile (/register/profile) │
// ╰───────────────────────────────────────────────────────────╯
// DefaultCreateProfileParams returns a default CreateProfileParams
func DefaultCreateProfileParams() CreateProfileParams {
return CreateProfileParams{
TurnstileSiteKey: "",
FirstNumber: 0,
LastNumber: 0,
}
}
// CreateProfileParams represents the parameters for creating a profile
type CreateProfileParams struct {
TurnstileSiteKey string
FirstNumber int
LastNumber int
}
// Sum returns the sum of the first and last number
func (d CreateProfileParams) Sum() int {
return d.FirstNumber + d.LastNumber
}

View File

@ -1,43 +0,0 @@
package types
import "github.com/onsonr/sonr/internal/database/repository"
// Define the credential structure matching our frontend data
type CredentialDescriptor struct {
ID string `json:"id"`
RawID string `json:"rawId"`
Type string `json:"type"`
AuthenticatorAttachment string `json:"authenticatorAttachment"`
Transports string `json:"transports"`
ClientExtensionResults map[string]string `json:"clientExtensionResults"`
Response struct {
AttestationObject string `json:"attestationObject"`
ClientDataJSON string `json:"clientDataJSON"`
} `json:"response"`
}
func (c *CredentialDescriptor) ToModel(handle, origin string) *repository.Credential {
return &repository.Credential{
Handle: handle,
Origin: origin,
CredentialID: c.ID,
Type: c.Type,
Transports: c.Transports,
AuthenticatorAttachment: c.AuthenticatorAttachment,
}
}
func CredentialArrayToDescriptors(credentials []repository.Credential) []*CredentialDescriptor {
var descriptors []*CredentialDescriptor
for _, cred := range credentials {
cd := &CredentialDescriptor{
ID: cred.CredentialID,
RawID: cred.CredentialID,
Type: cred.Type,
AuthenticatorAttachment: cred.AuthenticatorAttachment,
Transports: cred.Transports,
}
descriptors = append(descriptors, cd)
}
return descriptors
}

View File

@ -1,7 +1,6 @@
package views
import (
"github.com/onsonr/sonr/pkg/gateway/types"
"github.com/onsonr/sonr/internal/nebula/card"
"github.com/onsonr/sonr/internal/nebula/form"
"github.com/onsonr/sonr/internal/nebula/hero"
@ -9,7 +8,7 @@ import (
"github.com/onsonr/sonr/internal/nebula/layout"
)
templ RegisterProfileView(data types.CreateProfileParams) {
templ RegisterProfileView(firstNumber int, lastNumber int) {
@layout.View("New Profile | Sonr.ID") {
@layout.Container() {
@hero.TitleDesc("Basic Info", "Tell us a little about yourself.")
@ -23,7 +22,7 @@ templ RegisterProfileView(data types.CreateProfileParams) {
}
@input.Handle()
@input.Name()
@input.HumanSlider(data.FirstNumber, data.LastNumber)
@input.HumanSlider(firstNumber, lastNumber)
@form.Footer() {
@form.CancelButton()
@form.SubmitButton("Next")
@ -34,7 +33,7 @@ templ RegisterProfileView(data types.CreateProfileParams) {
}
}
templ RegisterPasskeyView(data types.CreatePasskeyParams) {
templ RegisterPasskeyView(address string, handle string, name string, challenge string, creationBlock string) {
@layout.View("Register | Sonr.ID") {
@layout.Container() {
@hero.TitleDesc("Link a PassKey", "This will be used to login to your vault.")
@ -42,11 +41,11 @@ templ RegisterPasskeyView(data types.CreatePasskeyParams) {
<input type="hidden" name="credential" id="credential-data" required/>
@form.Body() {
@form.Header() {
@card.SonrProfile(data.Address, data.Name, data.Handle, data.CreationBlock)
@card.SonrProfile(address, name, handle, creationBlock)
}
@input.CoinSelect()
@form.Footer() {
@input.Passkey(data.Address, data.Handle, data.Challenge)
@input.Passkey(address, handle, challenge)
@form.CancelButton()
}
}

View File

@ -14,10 +14,9 @@ import (
"github.com/onsonr/sonr/internal/nebula/hero"
"github.com/onsonr/sonr/internal/nebula/input"
"github.com/onsonr/sonr/internal/nebula/layout"
"github.com/onsonr/sonr/pkg/gateway/types"
)
func RegisterProfileView(data types.CreateProfileParams) templ.Component {
func RegisterProfileView(firstNumber int, lastNumber int) templ.Component {
return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil {
@ -136,7 +135,7 @@ func RegisterProfileView(data types.CreateProfileParams) templ.Component {
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = input.HumanSlider(data.FirstNumber, data.LastNumber).Render(ctx, templ_7745c5c3_Buffer)
templ_7745c5c3_Err = input.HumanSlider(firstNumber, lastNumber).Render(ctx, templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
@ -202,7 +201,7 @@ func RegisterProfileView(data types.CreateProfileParams) templ.Component {
})
}
func RegisterPasskeyView(data types.CreatePasskeyParams) templ.Component {
func RegisterPasskeyView(address string, handle string, name string, challenge string, creationBlock string) templ.Component {
return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil {
@ -295,7 +294,7 @@ func RegisterPasskeyView(data types.CreatePasskeyParams) templ.Component {
}()
}
ctx = templ.InitializeContext(ctx)
templ_7745c5c3_Err = card.SonrProfile(data.Address, data.Name, data.Handle, data.CreationBlock).Render(ctx, templ_7745c5c3_Buffer)
templ_7745c5c3_Err = card.SonrProfile(address, name, handle, creationBlock).Render(ctx, templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
@ -329,7 +328,7 @@ func RegisterPasskeyView(data types.CreatePasskeyParams) templ.Component {
}()
}
ctx = templ.InitializeContext(ctx)
templ_7745c5c3_Err = input.Passkey(data.Address, data.Handle, data.Challenge).Render(ctx, templ_7745c5c3_Buffer)
templ_7745c5c3_Err = input.Passkey(address, handle, challenge).Render(ctx, templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}

View File

@ -30,9 +30,6 @@ class Config {
@JsonField
sonrChainId: String
@JsonField
vaultSchema: Schema
}
class Environment {
@ -52,30 +49,3 @@ class Environment {
wasmPath: String
}
class Schema {
version: Int
@JsonField
account: String
@JsonField
asset: String
@JsonField
chain: String
@JsonField
credential: String
@JsonField
jwk: String
@JsonField
grant: String
@JsonField
keyshare: String
@JsonField
profile: String
}

Some files were not shown because too many files have changed in this diff Show More