feature/simplify ucan mpc did (#1195)

* feat: enable DID auth middleware

* feat: implement passkey creation flow

* feat: persist user address in cookie and retrieve user profile using address cookie

* feat: implement human verification challenge during session initialization

* refactor: remove unnecessary random number generation in profile creation

* refactor: rename credential validation handler and update related routes

* feat: improve profile validation and user experience

* feat: add page rendering for profile and passkey creation

* refactor: remove unused register handler and update routes

* refactor: remove unused imports and simplify credential validation

* fix: Correct insecure gRPC client connection

* refactor: rename models files for better organization

* refactor: refactor grpc client creation and management

* refactor: refactor common clients package

* <no value>

* feat: add CapAccount, CapInterchain, CapVault enums

* feat: add ChainId to ResAccount and ResInterchain

* feat: add asset code to resource account enumeration

* refactor: rename services package to providers

* feat: implement gateway database interactions

* refactor: move gateway repository to internal/gateway

* refactor: Migrate database provider to use sqlx

* refactor: Rename Vaults to VaultProvider in HTTPContext struct

* refactor: Migrate from GORM to sqlc Queries in database context methods

* refactor: Replace GORM with standard SQL and simplify database initialization

* refactor: Migrate session management from GORM to sqlc with type conversion

* refactor: Update import paths and model references in context package

* fix: Resolve session type conversion and middleware issues

* refactor: Migrate database from GORM to sqlx

* refactor: Move models to pkg/common, improve code structure

* refactor: move repository package to internal directory

* refactor: move gateway internal packages to context directory

* refactor: migrate database provider to use sqlx queries

* feat: add session ID to HTTP context and use it to load session data

* feat: implement vault creation API endpoint

* feat: add DIDKey generation from PubKey

* refactor: remove unused DIDAuth components

* refactor: move DID auth controller to vault context

* chore: remove unused DIDAuth package

* refactor: improve clarity of enclave refresh function

* feat: implement nonce-based key encryption for improved security

* feat: Add Export and Import methods with comprehensive tests for Enclave

* fix: Validate AES key length in keyshare encryption and decryption

* fix: Resolve key length validation by hashing input keys

* refactor: Update keyshare import to use protocol decoding

* feat: Refactor enclave encryption to support full enclave export/import

* refactor: Simplify Enclave interface methods by removing role parameter

* refactor: remove unnecessary serialization from enclave interface

* refactor: rename models package in gateway context

* refactor: rename keystore vault constants

* refactor: remove context parameter from Resolver methods

* feat: add CurrentBlock context function and update related components

* refactor: rename resolver.go to resolvers.go

* feat: Add SQLite random() generation for session and profile initialization

* refactor: Update SQL queries to use SQLite-style parameter placeholders

* refactor: Replace '?' placeholders with '$n' PostgreSQL parameter syntax

* <no value>

* refactor: refactor gateway to use middleware for database interactions and improve modularity

* feat: implement gateway for Sonr highway

* refactor: Remove unused gateway context and refactor cookie/header handling

* refactor: improve server initialization and middleware handling

* feat: implement human verification for profile creation

* feat: implement session management middleware

* refactor: refactor common models and config to internal package

* refactor: move env config to internal/config

* refactor: move database-related code to  directory

* refactor: move IPFS client to common package and improve code structure

* refactor: move querier to common package and rename to chain_query

* refactor: move webworker model to internal/models

* feat: add initial view template for Sonr.ID

* docs(concepts): Add documentation for cosmos-proto

* docs: move IBC transfer documentation to tools section

* refactor: rename initpkl.go to pkl_init.go for better naming consistency

* docs(theme): update dark mode toggle icons

* refactor: update sqlite3 driver to ncruces/go-sqlite3

* feat: add Vault model and database interactions

* refactor: Improve SQLite schema with better constraints and indexes

* chore: update project dependencies

* fix: use grpc.WithInsecure() for gRPC connection

* config: set localhost as default Sonr gRPC URL

* refactor: improve gateway middleware and refactor server initialization

* refactor: Remove foreign key pragma from schema SQL

* refactor: Remove foreign key constraints from database schema

* refactor: Convert primary key columns from INTEGER to TEXT

* refactor: Remove unnecessary redirect in error handling
This commit is contained in:
Prad Nukala 2024-12-16 15:29:54 -05:00 committed by GitHub
parent 6d27b926f6
commit 7c4586ce90
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
196 changed files with 4480 additions and 3192 deletions

5
.gitignore vendored
View File

@ -1,4 +1,5 @@
# Binaries # Binaries
no
.data .data
schemas schemas
*.db *.db
@ -95,4 +96,8 @@ sonr.wiki
!buf.lock !buf.lock
.air.toml .air.toml
mprocs.yaml
mprocs.log
tools-stamp
sonr.log

View File

@ -306,7 +306,7 @@ sh-testnet: mod-tidy
############################################################################### ###############################################################################
### generation ### ### generation ###
############################################################################### ###############################################################################
.PHONY: gen-pkl gen-templ .PHONY: gen-pkl gen-templ gen-sqlc
gen-pkl: init-env gen-pkl: init-env
pkl-gen-go pkl/sonr.orm/UCAN.pkl pkl-gen-go pkl/sonr.orm/UCAN.pkl
@ -314,8 +314,11 @@ gen-pkl: init-env
pkl-gen-go pkl/sonr.net/Hway.pkl pkl-gen-go pkl/sonr.net/Hway.pkl
pkl-gen-go pkl/sonr.net/Motr.pkl pkl-gen-go pkl/sonr.net/Motr.pkl
gen-sqlc: init-env
@cd internal/database && sqlc generate
gen-templ: init-env gen-templ: init-env
templ generate @templ generate
############################################################################### ###############################################################################

View File

@ -6,7 +6,6 @@
[![Static Badge](https://img.shields.io/badge/homepage-sonr.io-blue?style=flat-square)](https://sonr.io) [![Static Badge](https://img.shields.io/badge/homepage-sonr.io-blue?style=flat-square)](https://sonr.io)
[![Go Report Card](https://goreportcard.com/badge/github.com/onsonr/sonr)](https://goreportcard.com/report/github.com/onsonr/sonr) [![Go Report Card](https://goreportcard.com/badge/github.com/onsonr/sonr)](https://goreportcard.com/report/github.com/onsonr/sonr)
[![Security Rating](https://sonarcloud.io/api/project_badges/measure?project=sonrhq_sonr&metric=security_rating)](https://sonarcloud.io/summary/new_code?id=sonr-io_sonr) [![Security Rating](https://sonarcloud.io/api/project_badges/measure?project=sonrhq_sonr&metric=security_rating)](https://sonarcloud.io/summary/new_code?id=sonr-io_sonr)
[![Latest version of 'sonrd' @ Cloudsmith](https://api-prd.cloudsmith.io/v1/badges/version/sonr/sonr/deb/sonrd/latest/a=amd64;xc=main;d=ubuntu%252Fxenial;t=binary/?render=true&show_latest=true)](https://cloudsmith.io/~sonr/repos/sonr/packages/detail/deb/sonrd/latest/a=amd64;xc=main;d=ubuntu%252Fxenial;t=binary/)
> Sonr is a combination of decentralized primitives. Fundamentally, it is a peer-to-peer identity and asset management system that leverages DID documents, Webauthn, and IPFS—providing users with a secure, portable decentralized identity. > Sonr is a combination of decentralized primitives. Fundamentally, it is a peer-to-peer identity and asset management system that leverages DID documents, Webauthn, and IPFS—providing users with a secure, portable decentralized identity.

View File

@ -6,6 +6,8 @@ import (
"net/http" "net/http"
"os" "os"
"github.com/onsonr/sonr/pkg/common"
"github.com/onsonr/sonr/pkg/gateway"
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )
@ -18,11 +20,10 @@ var (
sonrGrpcURL string // Sonr gRPC URL (default localhost:9090) sonrGrpcURL string // Sonr gRPC URL (default localhost:9090)
sonrRPCURL string // Sonr RPC URL (default localhost:26657) sonrRPCURL string // Sonr RPC URL (default localhost:26657)
sqliteFile string // SQLite database file (default hway.db) psqlHost string // PostgresSQL Host Flag
psqlHost string // PostgresSQL Host Flag psqlUser string // PostgresSQL User Flag
psqlUser string // PostgresSQL User Flag psqlPass string // PostgresSQL Password Flag
psqlPass string // PostgresSQL Password Flag psqlDB string // PostgresSQL Database Flag
psqlDB string // PostgresSQL Database Flag
) )
func rootCmd() *cobra.Command { func rootCmd() *cobra.Command {
@ -34,11 +35,11 @@ func rootCmd() *cobra.Command {
if err != nil { if err != nil {
panic(err) panic(err)
} }
db, ipc, err := initDeps(env) ipc, err := common.NewIPFS()
if err != nil { if err != nil {
panic(err) panic(err)
} }
e, err := setupServer(env, db, ipc) e, err := gateway.New(env, ipc)
if err != nil { if err != nil {
panic(err) panic(err)
} }
@ -55,7 +56,6 @@ func rootCmd() *cobra.Command {
cmd.Flags().StringVar(&sonrAPIURL, "sonr-api-url", "localhost:1317", "Sonr API URL") cmd.Flags().StringVar(&sonrAPIURL, "sonr-api-url", "localhost:1317", "Sonr API URL")
cmd.Flags().StringVar(&sonrGrpcURL, "sonr-grpc-url", "localhost:9090", "Sonr gRPC URL") cmd.Flags().StringVar(&sonrGrpcURL, "sonr-grpc-url", "localhost:9090", "Sonr gRPC URL")
cmd.Flags().StringVar(&sonrRPCURL, "sonr-rpc-url", "localhost:26657", "Sonr RPC URL") cmd.Flags().StringVar(&sonrRPCURL, "sonr-rpc-url", "localhost:26657", "Sonr RPC URL")
cmd.Flags().StringVar(&sqliteFile, "sqlite-file", "hway.db", "File to store sqlite database")
cmd.Flags().StringVar(&psqlHost, "psql-host", "", "PostgresSQL Host") cmd.Flags().StringVar(&psqlHost, "psql-host", "", "PostgresSQL Host")
cmd.Flags().StringVar(&psqlUser, "psql-user", "", "PostgresSQL User") cmd.Flags().StringVar(&psqlUser, "psql-user", "", "PostgresSQL User")
cmd.Flags().StringVar(&psqlPass, "psql-pass", "", "PostgresSQL Password") cmd.Flags().StringVar(&psqlPass, "psql-pass", "", "PostgresSQL Password")

View File

@ -5,15 +5,7 @@ import (
"fmt" "fmt"
"os" "os"
"github.com/labstack/echo-contrib/echoprometheus" config "github.com/onsonr/sonr/internal/config/hway"
"github.com/labstack/echo/v4"
"github.com/labstack/echo/v4/middleware"
"github.com/onsonr/sonr/crypto/ucan"
"github.com/onsonr/sonr/internal/gateway"
config "github.com/onsonr/sonr/pkg/config/hway"
"github.com/onsonr/sonr/pkg/didauth/producer"
"github.com/onsonr/sonr/pkg/ipfsapi"
"gorm.io/gorm"
) )
// main is the entry point for the application // main is the entry point for the application
@ -26,20 +18,6 @@ func main() {
os.Exit(0) os.Exit(0)
} }
func initDeps(env config.Hway) (*gorm.DB, ipfsapi.Client, error) {
db, err := gateway.NewDB(env)
if err != nil {
return nil, nil, err
}
ipc, err := ipfsapi.NewClient()
if err != nil {
return nil, nil, err
}
return db, ipc, nil
}
func loadEnvImplFromArgs(args []string) (config.Hway, error) { func loadEnvImplFromArgs(args []string) (config.Hway, error) {
cmd := rootCmd() cmd := rootCmd()
if err := cmd.ParseFlags(args); err != nil { if err := cmd.ParseFlags(args); err != nil {
@ -48,7 +26,6 @@ func loadEnvImplFromArgs(args []string) (config.Hway, error) {
env := &config.HwayImpl{ env := &config.HwayImpl{
ServePort: servePort, ServePort: servePort,
SqliteFile: sqliteFile,
ChainId: chainID, ChainId: chainID,
IpfsGatewayUrl: ipfsGatewayURL, IpfsGatewayUrl: ipfsGatewayURL,
SonrApiUrl: sonrAPIURL, SonrApiUrl: sonrAPIURL,
@ -58,15 +35,3 @@ func loadEnvImplFromArgs(args []string) (config.Hway, error) {
} }
return env, nil return env, nil
} }
// setupServer sets up the server
func setupServer(env config.Hway, db *gorm.DB, ipc ipfsapi.Client) (*echo.Echo, error) {
e := echo.New()
e.Use(echoprometheus.NewMiddleware("hway"))
e.IPExtractor = echo.ExtractIPDirect()
e.Use(middleware.Logger())
e.Use(middleware.Recover())
e.Use(producer.Middleware(ipc, ucan.ServicePermissions))
gateway.RegisterRoutes(e, env, db)
return e, nil
}

View File

@ -8,10 +8,9 @@ import (
"syscall/js" "syscall/js"
"github.com/labstack/echo/v4" "github.com/labstack/echo/v4"
"github.com/onsonr/sonr/internal/vault" "github.com/onsonr/sonr/pkg/vault/routes"
"github.com/onsonr/sonr/pkg/common/wasm" "github.com/onsonr/sonr/cmd/motr/wasm"
"github.com/onsonr/sonr/pkg/config/motr" "github.com/onsonr/sonr/internal/config/motr"
// "github.com/onsonr/sonr/pkg/didauth/controller"
) )
var ( var (
@ -28,7 +27,7 @@ func simulateTx(this js.Value, args []js.Value) interface{} {
return nil return nil
} }
func processConfig(this js.Value, args []js.Value) interface{} { func syncData(this js.Value, args []js.Value) interface{} {
if len(args) < 1 { if len(args) < 1 {
return nil return nil
} }
@ -45,7 +44,7 @@ func main() {
// Load dwn config // Load dwn config
js.Global().Set("broadcastTx", js.FuncOf(broadcastTx)) js.Global().Set("broadcastTx", js.FuncOf(broadcastTx))
js.Global().Set("simulateTx", js.FuncOf(simulateTx)) js.Global().Set("simulateTx", js.FuncOf(simulateTx))
js.Global().Set("processConfig", js.FuncOf(processConfig)) js.Global().Set("syncData", js.FuncOf(syncData))
e := echo.New() e := echo.New()
e.Use(wasm.ContextMiddleware) e.Use(wasm.ContextMiddleware)

View File

@ -38,6 +38,11 @@ func NewDID(pub crypto.PubKey) (DID, error) {
} }
} }
// NewFromPubKey constructs an Identifier from a public key
func NewFromPubKey(pub PubKey) DID {
return DID{PubKey: pub}
}
// MulticodecType indicates the type for this multicodec // MulticodecType indicates the type for this multicodec
func (id DID) MulticodecType() uint64 { func (id DID) MulticodecType() uint64 {
switch id.Type() { switch id.Type() {

View File

@ -1,16 +1,21 @@
package keys package keys
import ( import (
"bytes"
"crypto/ecdsa" "crypto/ecdsa"
"encoding/hex" "encoding/hex"
p2pcrypto "github.com/libp2p/go-libp2p/core/crypto"
p2ppb "github.com/libp2p/go-libp2p/core/crypto/pb"
"github.com/onsonr/sonr/crypto/core/curves" "github.com/onsonr/sonr/crypto/core/curves"
"golang.org/x/crypto/sha3" "golang.org/x/crypto/sha3"
) )
type PubKey interface { type PubKey interface {
Bytes() []byte Bytes() []byte
Type() string Raw() ([]byte, error)
Equals(b p2pcrypto.Key) bool
Type() p2ppb.KeyType
Hex() string Hex() string
Verify(msg []byte, sig []byte) (bool, error) Verify(msg []byte, sig []byte) (bool, error)
} }
@ -30,12 +35,31 @@ func (p pubKey) Bytes() []byte {
return p.publicPoint.ToAffineCompressed() return p.publicPoint.ToAffineCompressed()
} }
func (p pubKey) Raw() ([]byte, error) {
return p.publicPoint.ToAffineCompressed(), nil
}
func (p pubKey) Equals(b p2pcrypto.Key) bool {
if b == nil {
return false
}
apbz, err := b.Raw()
if err != nil {
return false
}
bbz, err := p.Raw()
if err != nil {
return false
}
return bytes.Equal(apbz, bbz)
}
func (p pubKey) Hex() string { func (p pubKey) Hex() string {
return hex.EncodeToString(p.publicPoint.ToAffineCompressed()) return hex.EncodeToString(p.publicPoint.ToAffineCompressed())
} }
func (p pubKey) Type() string { func (p pubKey) Type() p2ppb.KeyType {
return "secp256k1" return p2ppb.KeyType_Secp256k1
} }
func (p pubKey) Verify(data []byte, sigBz []byte) (bool, error) { func (p pubKey) Verify(data []byte, sigBz []byte) (bool, error) {

View File

@ -3,6 +3,7 @@ package mpc
import ( import (
"github.com/onsonr/sonr/crypto/core/curves" "github.com/onsonr/sonr/crypto/core/curves"
"github.com/onsonr/sonr/crypto/core/protocol" "github.com/onsonr/sonr/crypto/core/protocol"
"github.com/onsonr/sonr/crypto/keys"
"github.com/onsonr/sonr/crypto/tecdsa/dklsv1/dkg" "github.com/onsonr/sonr/crypto/tecdsa/dklsv1/dkg"
) )
@ -20,3 +21,22 @@ type (
RefreshFunc interface{ protocol.Iterator } // RefreshFunc is the type for the refresh function RefreshFunc interface{ protocol.Iterator } // RefreshFunc is the type for the refresh function
SignFunc interface{ protocol.Iterator } // SignFunc is the type for the sign function SignFunc interface{ protocol.Iterator } // SignFunc is the type for the sign function
) )
const (
RoleVal = "validator"
RoleUser = "user"
)
// Enclave defines the interface for key management operations
type Enclave interface {
Address() string // Address returns the Sonr address of the keyEnclave
DID() keys.DID // DID returns the DID of the keyEnclave
Export(key []byte) ([]byte, error) // Export returns encrypted enclave data
Import(data []byte, key []byte) error // Import decrypts and loads enclave data
IsValid() bool // IsValid returns true if the keyEnclave is valid
PubKey() keys.PubKey // PubKey returns the public key of the keyEnclave
Refresh() (Enclave, error) // Refresh returns a new keyEnclave
Serialize() ([]byte, error) // Serialize returns the serialized keyEnclave
Sign(data []byte) ([]byte, error) // Sign returns the signature of the data
Verify(data []byte, sig []byte) (bool, error) // Verify returns true if the signature is valid
}

View File

@ -1,6 +1,7 @@
package mpc package mpc
import ( import (
"crypto/rand"
"strings" "strings"
"testing" "testing"
@ -8,22 +9,74 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
func randNonce() []byte {
nonce := make([]byte, 12)
rand.Read(nonce)
return nonce
}
func TestKeyShareGeneration(t *testing.T) { func TestKeyShareGeneration(t *testing.T) {
t.Run("Generate Valid Enclave", func(t *testing.T) { t.Run("Generate Valid Enclave", func(t *testing.T) {
nonce := randNonce()
// Generate enclave // Generate enclave
enclave, err := GenEnclave() enclave, err := GenEnclave(nonce)
require.NoError(t, err) require.NoError(t, err)
require.NotNil(t, enclave) require.NotNil(t, enclave)
// Validate enclave contents // Validate enclave contents
assert.True(t, enclave.IsValid()) assert.True(t, enclave.IsValid())
}) })
t.Run("Export and Import", func(t *testing.T) {
nonce := randNonce()
// Generate original enclave
original, err := GenEnclave(nonce)
require.NoError(t, err)
// Test key for encryption/decryption (32 bytes)
testKey := []byte("test-key-12345678-test-key-123456")
// Test Export/Import
t.Run("Full Enclave", func(t *testing.T) {
// Export enclave
data, err := original.Export(testKey)
require.NoError(t, err)
require.NotEmpty(t, data)
// Create new empty enclave
newEnclave, err := GenEnclave(nonce)
require.NoError(t, err)
// Import enclave
err = newEnclave.Import(data, testKey)
require.NoError(t, err)
// Verify the imported enclave works by signing
testData := []byte("test message")
sig, err := newEnclave.Sign(testData)
require.NoError(t, err)
valid, err := newEnclave.Verify(testData, sig)
require.NoError(t, err)
assert.True(t, valid)
})
// Test Invalid Key
t.Run("Invalid Key", func(t *testing.T) {
data, err := original.Export(testKey)
require.NoError(t, err)
wrongKey := []byte("wrong-key-12345678")
err = original.Import(data, wrongKey)
assert.Error(t, err)
})
})
} }
func TestEnclaveOperations(t *testing.T) { func TestEnclaveOperations(t *testing.T) {
t.Run("Signing and Verification", func(t *testing.T) { t.Run("Signing and Verification", func(t *testing.T) {
nonce := randNonce()
// Generate valid enclave // Generate valid enclave
enclave, err := GenEnclave() enclave, err := GenEnclave(nonce)
require.NoError(t, err) require.NoError(t, err)
// Test signing // Test signing
@ -45,7 +98,8 @@ func TestEnclaveOperations(t *testing.T) {
}) })
t.Run("Address and Public Key", func(t *testing.T) { t.Run("Address and Public Key", func(t *testing.T) {
enclave, err := GenEnclave() nonce := randNonce()
enclave, err := GenEnclave(nonce)
require.NoError(t, err) require.NoError(t, err)
// Test Address // Test Address
@ -60,17 +114,18 @@ func TestEnclaveOperations(t *testing.T) {
}) })
t.Run("Refresh Operation", func(t *testing.T) { t.Run("Refresh Operation", func(t *testing.T) {
enclave, err := GenEnclave() nonce := randNonce()
enclave, err := GenEnclave(nonce)
require.NoError(t, err) require.NoError(t, err)
// Test refresh // Test refresh
refreshedEnclave, err := enclave.Refresh() refreshedEnclave, err := enclave.Refresh()
require.NoError(t, err) require.NoError(t, err)
require.NotNil(t, refreshedEnclave) require.NotNil(t, refreshedEnclave)
// Verify refreshed enclave is valid // Verify refreshed enclave is valid
assert.True(t, refreshedEnclave.IsValid()) assert.True(t, refreshedEnclave.IsValid())
// Verify it maintains the same address // Verify it maintains the same address
assert.Equal(t, enclave.Address(), refreshedEnclave.Address()) assert.Equal(t, enclave.Address(), refreshedEnclave.Address())
}) })
@ -78,28 +133,28 @@ func TestEnclaveOperations(t *testing.T) {
func TestEnclaveSerialization(t *testing.T) { func TestEnclaveSerialization(t *testing.T) {
t.Run("Marshal and Unmarshal", func(t *testing.T) { t.Run("Marshal and Unmarshal", func(t *testing.T) {
nonce := randNonce()
// Generate original enclave // Generate original enclave
original, err := GenEnclave() original, err := GenEnclave(nonce)
require.NoError(t, err) require.NoError(t, err)
require.NotNil(t, original) require.NotNil(t, original)
// Marshal // Marshal
keyEnclave, ok := original.(*KeyEnclave) keyclave, ok := original.(*keyEnclave)
require.True(t, ok) require.True(t, ok)
data, err := keyEnclave.Marshal() data, err := keyclave.Serialize()
require.NoError(t, err) require.NoError(t, err)
require.NotEmpty(t, data) require.NotEmpty(t, data)
// Unmarshal // Unmarshal
restored := &KeyEnclave{} restored := &keyEnclave{}
err = restored.Unmarshal(data) err = restored.Unmarshal(data)
require.NoError(t, err) require.NoError(t, err)
// Verify restored enclave // Verify restored enclave
assert.Equal(t, keyEnclave.Addr, restored.Addr) assert.Equal(t, keyclave.Addr, restored.Addr)
assert.True(t, keyEnclave.PubPoint.Equal(restored.PubPoint)) assert.True(t, keyclave.PubPoint.Equal(restored.PubPoint))
assert.Equal(t, keyEnclave.VaultCID, restored.VaultCID)
assert.True(t, restored.IsValid()) assert.True(t, restored.IsValid())
}) })
} }

View File

@ -1,63 +1,31 @@
package mpc package mpc
import ( import (
"crypto/aes"
"crypto/cipher"
"crypto/ecdsa" "crypto/ecdsa"
"encoding/json" "encoding/json"
"fmt"
"github.com/onsonr/sonr/crypto/core/curves" "github.com/onsonr/sonr/crypto/core/curves"
"github.com/onsonr/sonr/crypto/core/protocol"
"github.com/onsonr/sonr/crypto/keys" "github.com/onsonr/sonr/crypto/keys"
"github.com/onsonr/sonr/crypto/tecdsa/dklsv1"
"golang.org/x/crypto/sha3" "golang.org/x/crypto/sha3"
) )
// Enclave defines the interface for key management operations // keyEnclave implements the Enclave interface
type Enclave interface { type keyEnclave struct {
Address() string // Serialized fields
IsValid() bool
PubKey() keys.PubKey
Refresh() (Enclave, error)
Sign(data []byte) ([]byte, error)
Verify(data []byte, sig []byte) (bool, error)
}
// KeyEnclave implements the Enclave interface
type KeyEnclave struct {
Addr string `json:"address"` Addr string `json:"address"`
PubPoint curves.Point `json:"-"` PubPoint curves.Point `json:"-"`
PubBytes []byte `json:"pub_key"` PubBytes []byte `json:"pub_key"`
ValShare Message `json:"val_share"` ValShare Message `json:"val_share"`
UserShare Message `json:"user_share"` UserShare Message `json:"user_share"`
VaultCID string `json:"vault_cid,omitempty"`
// Extra fields
nonce []byte
} }
// Marshal returns the JSON encoding of KeyEnclave func newEnclave(valShare, userShare Message, nonce []byte) (Enclave, error) {
func (k *KeyEnclave) Marshal() ([]byte, error) {
// Store compressed public point bytes before marshaling
k.PubBytes = k.PubPoint.ToAffineCompressed()
return json.Marshal(k)
}
// Unmarshal parses the JSON-encoded data and stores the result
func (k *KeyEnclave) Unmarshal(data []byte) error {
if err := json.Unmarshal(data, k); err != nil {
return err
}
// Reconstruct Point from bytes
curve := curves.K256()
point, err := curve.NewIdentityPoint().FromAffineCompressed(k.PubBytes)
if err != nil {
return err
}
k.PubPoint = point
return nil
}
func (k *KeyEnclave) IsValid() bool {
return k.PubPoint != nil && k.ValShare != nil && k.UserShare != nil && k.Addr != ""
}
func initKeyEnclave(valShare, userShare Message) (*KeyEnclave, error) {
pubPoint, err := getAlicePubPoint(valShare) pubPoint, err := getAlicePubPoint(valShare)
if err != nil { if err != nil {
return nil, err return nil, err
@ -67,47 +35,105 @@ func initKeyEnclave(valShare, userShare Message) (*KeyEnclave, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
return &KeyEnclave{ return &keyEnclave{
Addr: addr, Addr: addr,
PubPoint: pubPoint, PubPoint: pubPoint,
ValShare: valShare, ValShare: valShare,
UserShare: userShare, UserShare: userShare,
nonce: nonce,
}, nil }, nil
} }
func (k *KeyEnclave) Address() string { // Address returns the Sonr address of the keyEnclave
func (k *keyEnclave) Address() string {
return k.Addr return k.Addr
} }
func (k *KeyEnclave) PubKey() keys.PubKey { // DID returns the DID of the keyEnclave
func (k *keyEnclave) DID() keys.DID {
return keys.NewFromPubKey(k.PubKey())
}
// Export returns encrypted enclave data
func (k *keyEnclave) Export(key []byte) ([]byte, error) {
data, err := k.Serialize()
if err != nil {
return nil, fmt.Errorf("failed to serialize enclave: %w", err)
}
hashedKey := hashKey(key)
block, err := aes.NewCipher(hashedKey)
if err != nil {
return nil, err
}
aesgcm, err := cipher.NewGCM(block)
if err != nil {
return nil, err
}
return aesgcm.Seal(nil, k.nonce, data, nil), nil
}
// Import decrypts and loads enclave data
func (k *keyEnclave) Import(data []byte, key []byte) error {
hashedKey := hashKey(key)
block, err := aes.NewCipher(hashedKey)
if err != nil {
return err
}
aesgcm, err := cipher.NewGCM(block)
if err != nil {
return err
}
decrypted, err := aesgcm.Open(nil, k.nonce, data, nil)
if err != nil {
return err
}
return k.Unmarshal(decrypted)
}
// IsValid returns true if the keyEnclave is valid
func (k *keyEnclave) IsValid() bool {
return k.PubPoint != nil && k.ValShare != nil && k.UserShare != nil && k.Addr != ""
}
// PubKey returns the public key of the keyEnclave
func (k *keyEnclave) PubKey() keys.PubKey {
return keys.NewPubKey(k.PubPoint) return keys.NewPubKey(k.PubPoint)
} }
func (k *KeyEnclave) Refresh() (Enclave, error) { // Refresh returns a new keyEnclave
refreshFuncVal, err := k.valRefreshFunc() func (k *keyEnclave) Refresh() (Enclave, error) {
refreshFuncVal, err := valRefreshFunc(k)
if err != nil { if err != nil {
return nil, err return nil, err
} }
refreshFuncUser, err := k.userRefreshFunc() refreshFuncUser, err := userRefreshFunc(k)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return ExecuteRefresh(refreshFuncVal, refreshFuncUser) return ExecuteRefresh(refreshFuncVal, refreshFuncUser, k.nonce)
} }
func (k *KeyEnclave) Sign(data []byte) ([]byte, error) { // Sign returns the signature of the data
userSign, err := k.userSignFunc(data) func (k *keyEnclave) Sign(data []byte) ([]byte, error) {
userSign, err := userSignFunc(k, data)
if err != nil { if err != nil {
return nil, err return nil, err
} }
valSign, err := k.valSignFunc(data) valSign, err := valSignFunc(k, data)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return ExecuteSigning(valSign, userSign) return ExecuteSigning(valSign, userSign)
} }
func (k *KeyEnclave) Verify(data []byte, sig []byte) (bool, error) { // Verify returns true if the signature is valid
func (k *keyEnclave) Verify(data []byte, sig []byte) (bool, error) {
edSig, err := deserializeSignature(sig) edSig, err := deserializeSignature(sig)
if err != nil { if err != nil {
return false, err return false, err
@ -121,31 +147,33 @@ func (k *KeyEnclave) Verify(data []byte, sig []byte) (bool, error) {
X: ePub.X, X: ePub.X,
Y: ePub.Y, Y: ePub.Y,
} }
// Hash the message using SHA3-256 // Hash the message using SHA3-256
hash := sha3.New256() hash := sha3.New256()
hash.Write(data) hash.Write(data)
digest := hash.Sum(nil) digest := hash.Sum(nil)
return ecdsa.Verify(pk, digest, edSig.R, edSig.S), nil return ecdsa.Verify(pk, digest, edSig.R, edSig.S), nil
} }
func (k *KeyEnclave) userSignFunc(bz []byte) (SignFunc, error) { // Marshal returns the JSON encoding of keyEnclave
curve := curves.K256() func (k *keyEnclave) Serialize() ([]byte, error) {
return dklsv1.NewBobSign(curve, sha3.New256(), bz, k.UserShare, protocol.Version1) // Store compressed public point bytes before marshaling
k.PubBytes = k.PubPoint.ToAffineCompressed()
return json.Marshal(k)
} }
func (k *KeyEnclave) userRefreshFunc() (RefreshFunc, error) { // Unmarshal parses the JSON-encoded data and stores the result
func (k *keyEnclave) Unmarshal(data []byte) error {
if err := json.Unmarshal(data, k); err != nil {
return err
}
// Reconstruct Point from bytes
curve := curves.K256() curve := curves.K256()
return dklsv1.NewBobRefresh(curve, k.UserShare, protocol.Version1) point, err := curve.NewIdentityPoint().FromAffineCompressed(k.PubBytes)
} if err != nil {
return err
func (k *KeyEnclave) valSignFunc(bz []byte) (SignFunc, error) { }
curve := curves.K256() k.PubPoint = point
return dklsv1.NewAliceSign(curve, sha3.New256(), bz, k.ValShare, protocol.Version1) return nil
}
func (k *KeyEnclave) valRefreshFunc() (RefreshFunc, error) {
curve := curves.K256()
return dklsv1.NewAliceRefresh(curve, k.ValShare, protocol.Version1)
} }

View File

@ -1,14 +1,13 @@
package mpc package mpc
import ( import (
"github.com/ipfs/kubo/client/rpc"
"github.com/onsonr/sonr/crypto/core/curves" "github.com/onsonr/sonr/crypto/core/curves"
"github.com/onsonr/sonr/crypto/core/protocol" "github.com/onsonr/sonr/crypto/core/protocol"
"github.com/onsonr/sonr/crypto/tecdsa/dklsv1" "github.com/onsonr/sonr/crypto/tecdsa/dklsv1"
) )
// GenEnclave generates a new MPC keyshare // GenEnclave generates a new MPC keyshare
func GenEnclave() (Enclave, error) { func GenEnclave(nonce []byte) (Enclave, error) {
curve := curves.K256() curve := curves.K256()
valKs := dklsv1.NewAliceDkg(curve, protocol.Version1) valKs := dklsv1.NewAliceDkg(curve, protocol.Version1)
userKs := dklsv1.NewBobDkg(curve, protocol.Version1) userKs := dklsv1.NewBobDkg(curve, protocol.Version1)
@ -24,31 +23,7 @@ func GenEnclave() (Enclave, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
return initKeyEnclave(valRes, userRes) return newEnclave(valRes, userRes, nonce)
}
// GenEnclaveIPFS generates a new MPC keyshare
func GenEnclaveIPFS(ipc *rpc.HttpApi) (Enclave, error) {
curve := curves.K256()
valKs := dklsv1.NewAliceDkg(curve, protocol.Version1)
userKs := dklsv1.NewBobDkg(curve, protocol.Version1)
aErr, bErr := RunProtocol(userKs, valKs)
if err := checkIteratedErrors(aErr, bErr); err != nil {
return nil, err
}
valRes, err := valKs.Result(protocol.Version1)
if err != nil {
return nil, err
}
userRes, err := userKs.Result(protocol.Version1)
if err != nil {
return nil, err
}
e, err := initKeyEnclave(valRes, userRes)
if err != nil {
return nil, err
}
return addEnclaveIPFS(e, ipc)
} }
// ExecuteSigning runs the MPC signing protocol // ExecuteSigning runs the MPC signing protocol
@ -73,7 +48,7 @@ func ExecuteSigning(signFuncVal SignFunc, signFuncUser SignFunc) ([]byte, error)
} }
// ExecuteRefresh runs the MPC refresh protocol // ExecuteRefresh runs the MPC refresh protocol
func ExecuteRefresh(refreshFuncVal RefreshFunc, refreshFuncUser RefreshFunc) (*KeyEnclave, error) { func ExecuteRefresh(refreshFuncVal RefreshFunc, refreshFuncUser RefreshFunc, nonce []byte) (Enclave, error) {
aErr, bErr := RunProtocol(refreshFuncVal, refreshFuncUser) aErr, bErr := RunProtocol(refreshFuncVal, refreshFuncUser)
if err := checkIteratedErrors(aErr, bErr); err != nil { if err := checkIteratedErrors(aErr, bErr); err != nil {
return nil, err return nil, err
@ -86,7 +61,7 @@ func ExecuteRefresh(refreshFuncVal RefreshFunc, refreshFuncUser RefreshFunc) (*K
if err != nil { if err != nil {
return nil, err return nil, err
} }
return initKeyEnclave(valRefreshResult, userRefreshResult) return newEnclave(valRefreshResult, userRefreshResult, nonce)
} }
// For DKG bob starts first. For refresh and sign, Alice starts first. // For DKG bob starts first. For refresh and sign, Alice starts first.

View File

@ -57,9 +57,10 @@ func (k ucanKeyshare) ChainCode() ([]byte, error) {
// DefaultOriginToken returns a default token with the keyshare's issuer as the audience // DefaultOriginToken returns a default token with the keyshare's issuer as the audience
func (k ucanKeyshare) OriginToken() (*Token, error) { func (k ucanKeyshare) OriginToken() (*Token, error) {
att := ucan.NewSmartAccount(k.addr) // att := ucan.NewSmartAccount(k.addr)
zero := time.Time{} zero := time.Time{}
return k.NewOriginToken(k.issuerDID, att, nil, zero, zero) // return k.NewOriginToken(k.issuerDID, att, nil, zero, zero)
return k.newToken(k.issuerDID, nil, nil, nil, zero, zero)
} }
func (k ucanKeyshare) SignData(data []byte) ([]byte, error) { func (k ucanKeyshare) SignData(data []byte) ([]byte, error) {
@ -101,7 +102,6 @@ func (k ucanKeyshare) UCANParser() *ucan.TokenParser {
if key == ucan.CapKey { if key == ucan.CapKey {
cap = val cap = val
} else { } else {
rsc = ucan.NewStringLengthResource(key, val)
} }
} }

View File

@ -1,34 +1,19 @@
package mpc package mpc
import ( import (
"context" "crypto/aes"
"encoding/json" "crypto/cipher"
"errors" "errors"
"fmt" "fmt"
"math/big" "math/big"
"github.com/cosmos/cosmos-sdk/types/bech32" "github.com/cosmos/cosmos-sdk/types/bech32"
"github.com/ipfs/boxo/files"
"github.com/ipfs/kubo/client/rpc"
"github.com/onsonr/sonr/crypto/core/curves" "github.com/onsonr/sonr/crypto/core/curves"
"github.com/onsonr/sonr/crypto/core/protocol" "github.com/onsonr/sonr/crypto/core/protocol"
"github.com/onsonr/sonr/crypto/tecdsa/dklsv1" "github.com/onsonr/sonr/crypto/tecdsa/dklsv1"
"golang.org/x/crypto/sha3"
) )
func addEnclaveIPFS(enclave *KeyEnclave, ipc *rpc.HttpApi) (Enclave, error) {
jsonEnclave, err := json.Marshal(enclave)
if err != nil {
return nil, err
}
// Save enclave to IPFS
cid, err := ipc.Unixfs().Add(context.Background(), files.NewBytesFile(jsonEnclave))
if err != nil {
return nil, err
}
enclave.VaultCID = cid.String()
return enclave, nil
}
func checkIteratedErrors(aErr, bErr error) error { func checkIteratedErrors(aErr, bErr error) error {
if aErr == protocol.ErrProtocolFinished && bErr == protocol.ErrProtocolFinished { if aErr == protocol.ErrProtocolFinished && bErr == protocol.ErrProtocolFinished {
return nil return nil
@ -51,6 +36,47 @@ func computeSonrAddr(pp Point) (string, error) {
return sonrAddr, nil return sonrAddr, nil
} }
func hashKey(key []byte) []byte {
hash := sha3.New256()
hash.Write(key)
return hash.Sum(nil)[:32] // Use first 32 bytes of hash
}
func decryptKeyshare(msg []byte, key []byte, nonce []byte) ([]byte, error) {
hashedKey := hashKey(key)
block, err := aes.NewCipher(hashedKey)
if err != nil {
return nil, err
}
aesgcm, err := cipher.NewGCM(block)
if err != nil {
return nil, err
}
plaintext, err := aesgcm.Open(nil, nonce, msg, nil)
if err != nil {
return nil, err
}
return plaintext, nil
}
func encryptKeyshare(msg Message, key []byte, nonce []byte) ([]byte, error) {
hashedKey := hashKey(key)
msgBytes, err := protocol.EncodeMessage(msg)
if err != nil {
return nil, err
}
block, err := aes.NewCipher(hashedKey)
if err != nil {
return nil, err
}
aesgcm, err := cipher.NewGCM(block)
if err != nil {
return nil, err
}
ciphertext := aesgcm.Seal(nil, nonce, []byte(msgBytes), nil)
return ciphertext, nil
}
func getAliceOut(msg *protocol.Message) (AliceOut, error) { func getAliceOut(msg *protocol.Message) (AliceOut, error) {
return dklsv1.DecodeAliceDkgResult(msg) return dklsv1.DecodeAliceDkgResult(msg)
} }
@ -122,3 +148,23 @@ func deserializeSignature(sigBytes []byte) (*curves.EcdsaSignature, error) {
S: s, S: s,
}, nil }, nil
} }
func userSignFunc(k *keyEnclave, bz []byte) (SignFunc, error) {
curve := curves.K256()
return dklsv1.NewBobSign(curve, sha3.New256(), bz, k.UserShare, protocol.Version1)
}
func userRefreshFunc(k *keyEnclave) (RefreshFunc, error) {
curve := curves.K256()
return dklsv1.NewBobRefresh(curve, k.UserShare, protocol.Version1)
}
func valSignFunc(k *keyEnclave, bz []byte) (SignFunc, error) {
curve := curves.K256()
return dklsv1.NewAliceSign(curve, sha3.New256(), bz, k.ValShare, protocol.Version1)
}
func valRefreshFunc(k *keyEnclave) (RefreshFunc, error) {
curve := curves.K256()
return dklsv1.NewAliceRefresh(curve, k.ValShare, protocol.Version1)
}

View File

@ -73,32 +73,6 @@ type Resource interface {
Contains(b Resource) bool Contains(b Resource) bool
} }
type stringLengthRsc struct {
t string
v string
}
// NewStringLengthResource is a silly implementation of resource to use while
// I figure out what an OR filter on strings is. Don't use this.
func NewStringLengthResource(typ, val string) Resource {
return stringLengthRsc{
t: typ,
v: val,
}
}
func (r stringLengthRsc) Type() string {
return r.t
}
func (r stringLengthRsc) Value() string {
return r.v
}
func (r stringLengthRsc) Contains(b Resource) bool {
return r.Type() == b.Type() && len(r.Value()) <= len(b.Value())
}
// Capability is an action users can perform // Capability is an action users can perform
type Capability interface { type Capability interface {
// A Capability must be expressable as a string // A Capability must be expressable as a string

View File

@ -1,96 +0,0 @@
package ucan
import (
"encoding/json"
"fmt"
"testing"
)
func TestAttenuationsContains(t *testing.T) {
aContains := [][2]string{
{
`[
{ "cap": "SUPER_USER", "dataset": "b5/world_bank_population"},
{ "cap": "OVERWRITE", "api": "https://api.qri.cloud" }
]`,
`[
{"cap": "SOFT_DELETE", "dataset": "b5/world_bank_population" }
]`,
},
{
`[
{ "cap": "SUPER_USER", "dataset": "b5/world_bank_population"},
{ "cap": "OVERWRITE", "api": "https://api.qri.cloud" }
]`,
`[
{"cap": "SUPER_USER", "dataset": "b5/world_bank_population" }
]`,
},
}
for i, c := range aContains {
t.Run(fmt.Sprintf("contains_%d", i), func(t *testing.T) {
a := testAttenuations(c[0])
b := testAttenuations(c[1])
if !a.Contains(b) {
t.Errorf("expected a attenuations to contain b attenuations")
}
})
}
aNotContains := [][2]string{
{
`[
{ "cap": "SUPER_USER", "dataset": "b5/world_bank_population"},
{ "cap": "OVERWRITE", "api": "https://api.qri.cloud" }
]`,
`[
{ "cap": "CREATE", "dataset": "b5" }
]`,
},
}
for i, c := range aNotContains {
t.Run(fmt.Sprintf("not_contains_%d", i), func(t *testing.T) {
a := testAttenuations(c[0])
b := testAttenuations(c[1])
if a.Contains(b) {
t.Errorf("expected a attenuations to NOT contain b attenuations")
}
})
}
}
func mustJSON(data string, v interface{}) {
if err := json.Unmarshal([]byte(data), v); err != nil {
panic(err)
}
}
func testAttenuations(data string) Attenuations {
caps := NewNestedCapabilities("SUPER_USER", "OVERWRITE", "SOFT_DELETE", "REVISE", "CREATE")
v := []map[string]string{}
mustJSON(data, &v)
var att Attenuations
for _, x := range v {
var cap Capability
var rsc Resource
for key, val := range x {
switch key {
case CapKey:
cap = caps.Cap(val)
default:
rsc = NewStringLengthResource(key, val)
}
}
att = append(att, Attenuation{cap, rsc})
}
return att
}
func TestNestedCapabilities(t *testing.T) {
}

View File

@ -1,79 +0,0 @@
// Code generated from Pkl module `sonr.orm.UCAN`. DO NOT EDIT.
package capability
import (
"encoding"
"fmt"
)
type Capability string
const (
CAPOWNER Capability = "CAP_OWNER"
CAPOPERATOR Capability = "CAP_OPERATOR"
CAPOBSERVER Capability = "CAP_OBSERVER"
CAPAUTHENTICATE Capability = "CAP_AUTHENTICATE"
CAPAUTHORIZE Capability = "CAP_AUTHORIZE"
CAPDELEGATE Capability = "CAP_DELEGATE"
CAPINVOKE Capability = "CAP_INVOKE"
CAPEXECUTE Capability = "CAP_EXECUTE"
CAPPROPOSE Capability = "CAP_PROPOSE"
CAPSIGN Capability = "CAP_SIGN"
CAPSETPOLICY Capability = "CAP_SET_POLICY"
CAPSETTHRESHOLD Capability = "CAP_SET_THRESHOLD"
CAPRECOVER Capability = "CAP_RECOVER"
CAPSOCIAL Capability = "CAP_SOCIAL"
CAPVOTE Capability = "CAP_VOTE"
CAPRESOLVER Capability = "CAP_RESOLVER"
CAPPRODUCER Capability = "CAP_PRODUCER"
)
// String returns the string representation of Capability
func (rcv Capability) String() string {
return string(rcv)
}
var _ encoding.BinaryUnmarshaler = new(Capability)
// UnmarshalBinary implements encoding.BinaryUnmarshaler for Capability.
func (rcv *Capability) UnmarshalBinary(data []byte) error {
switch str := string(data); str {
case "CAP_OWNER":
*rcv = CAPOWNER
case "CAP_OPERATOR":
*rcv = CAPOPERATOR
case "CAP_OBSERVER":
*rcv = CAPOBSERVER
case "CAP_AUTHENTICATE":
*rcv = CAPAUTHENTICATE
case "CAP_AUTHORIZE":
*rcv = CAPAUTHORIZE
case "CAP_DELEGATE":
*rcv = CAPDELEGATE
case "CAP_INVOKE":
*rcv = CAPINVOKE
case "CAP_EXECUTE":
*rcv = CAPEXECUTE
case "CAP_PROPOSE":
*rcv = CAPPROPOSE
case "CAP_SIGN":
*rcv = CAPSIGN
case "CAP_SET_POLICY":
*rcv = CAPSETPOLICY
case "CAP_SET_THRESHOLD":
*rcv = CAPSETTHRESHOLD
case "CAP_RECOVER":
*rcv = CAPRECOVER
case "CAP_SOCIAL":
*rcv = CAPSOCIAL
case "CAP_VOTE":
*rcv = CAPVOTE
case "CAP_RESOLVER":
*rcv = CAPRESOLVER
case "CAP_PRODUCER":
*rcv = CAPPRODUCER
default:
return fmt.Errorf(`illegal: "%s" is not a valid Capability`, str)
}
return nil
}

View File

@ -0,0 +1,49 @@
// Code generated from Pkl module `sonr.orm.UCAN`. DO NOT EDIT.
package capaccount
import (
"encoding"
"fmt"
)
type CapAccount string
const (
ExecBroadcast CapAccount = "exec/broadcast"
ExecQuery CapAccount = "exec/query"
ExecSimulate CapAccount = "exec/simulate"
ExecVote CapAccount = "exec/vote"
ExecDelegate CapAccount = "exec/delegate"
ExecInvoke CapAccount = "exec/invoke"
ExecSend CapAccount = "exec/send"
)
// String returns the string representation of CapAccount
func (rcv CapAccount) String() string {
return string(rcv)
}
var _ encoding.BinaryUnmarshaler = new(CapAccount)
// UnmarshalBinary implements encoding.BinaryUnmarshaler for CapAccount.
func (rcv *CapAccount) UnmarshalBinary(data []byte) error {
switch str := string(data); str {
case "exec/broadcast":
*rcv = ExecBroadcast
case "exec/query":
*rcv = ExecQuery
case "exec/simulate":
*rcv = ExecSimulate
case "exec/vote":
*rcv = ExecVote
case "exec/delegate":
*rcv = ExecDelegate
case "exec/invoke":
*rcv = ExecInvoke
case "exec/send":
*rcv = ExecSend
default:
return fmt.Errorf(`illegal: "%s" is not a valid CapAccount`, str)
}
return nil
}

View File

@ -0,0 +1,11 @@
package capaccount
import "github.com/onsonr/sonr/crypto/ucan"
func NewCap(ty CapAccount) ucan.Capability {
return ucan.Capability(ty)
}
func (c CapAccount) Contains(b ucan.Capability) bool {
return c.String() == b.String()
}

View File

@ -0,0 +1,43 @@
// Code generated from Pkl module `sonr.orm.UCAN`. DO NOT EDIT.
package capinterchain
import (
"encoding"
"fmt"
)
type CapInterchain string
const (
TransferSwap CapInterchain = "transfer/swap"
TransferSend CapInterchain = "transfer/send"
TransferAtomic CapInterchain = "transfer/atomic"
TransferBatch CapInterchain = "transfer/batch"
TransferP2p CapInterchain = "transfer/p2p"
)
// String returns the string representation of CapInterchain
func (rcv CapInterchain) String() string {
return string(rcv)
}
var _ encoding.BinaryUnmarshaler = new(CapInterchain)
// UnmarshalBinary implements encoding.BinaryUnmarshaler for CapInterchain.
func (rcv *CapInterchain) UnmarshalBinary(data []byte) error {
switch str := string(data); str {
case "transfer/swap":
*rcv = TransferSwap
case "transfer/send":
*rcv = TransferSend
case "transfer/atomic":
*rcv = TransferAtomic
case "transfer/batch":
*rcv = TransferBatch
case "transfer/p2p":
*rcv = TransferP2p
default:
return fmt.Errorf(`illegal: "%s" is not a valid CapInterchain`, str)
}
return nil
}

View File

@ -0,0 +1,11 @@
package capinterchain
import "github.com/onsonr/sonr/crypto/ucan"
func NewCap(ty CapInterchain) ucan.Capability {
return ucan.Capability(ty)
}
func (c CapInterchain) Contains(b ucan.Capability) bool {
return c.String() == b.String()
}

View File

@ -0,0 +1,49 @@
// Code generated from Pkl module `sonr.orm.UCAN`. DO NOT EDIT.
package capvault
import (
"encoding"
"fmt"
)
type CapVault string
const (
CrudAsset CapVault = "crud/asset"
CrudAuthzgrant CapVault = "crud/authzgrant"
CrudProfile CapVault = "crud/profile"
CrudRecord CapVault = "crud/record"
UseRecovery CapVault = "use/recovery"
UseSync CapVault = "use/sync"
UseSigner CapVault = "use/signer"
)
// String returns the string representation of CapVault
func (rcv CapVault) String() string {
return string(rcv)
}
var _ encoding.BinaryUnmarshaler = new(CapVault)
// UnmarshalBinary implements encoding.BinaryUnmarshaler for CapVault.
func (rcv *CapVault) UnmarshalBinary(data []byte) error {
switch str := string(data); str {
case "crud/asset":
*rcv = CrudAsset
case "crud/authzgrant":
*rcv = CrudAuthzgrant
case "crud/profile":
*rcv = CrudProfile
case "crud/record":
*rcv = CrudRecord
case "use/recovery":
*rcv = UseRecovery
case "use/sync":
*rcv = UseSync
case "use/signer":
*rcv = UseSigner
default:
return fmt.Errorf(`illegal: "%s" is not a valid CapVault`, str)
}
return nil
}

View File

@ -0,0 +1,11 @@
package capvault
import "github.com/onsonr/sonr/crypto/ucan"
func NewCap(ty CapVault) ucan.Capability {
return ucan.Capability(ty)
}
func (c CapVault) Contains(b ucan.Capability) bool {
return c.String() == b.String()
}

View File

@ -0,0 +1,114 @@
// Package attns implements the UCAN resource and capability types
package attns
import (
"github.com/onsonr/sonr/crypto/ucan"
"github.com/onsonr/sonr/crypto/ucan/attns/capaccount"
"github.com/onsonr/sonr/crypto/ucan/attns/capinterchain"
"github.com/onsonr/sonr/crypto/ucan/attns/capvault"
"github.com/onsonr/sonr/crypto/ucan/attns/resaccount"
"github.com/onsonr/sonr/crypto/ucan/attns/resinterchain"
"github.com/onsonr/sonr/crypto/ucan/attns/resvault"
)
// Capability hierarchy for sonr network
// -------------------------------------
// VAULT (DWN)
//
// └─ CRUD/ASSET
// └─ CRUD/AUTHZGRANT
// └─ CRUD/PROFILE
// └─ CRUD/RECORD
// └─ USE/RECOVERY
// └─ USE/SYNC
// └─ USE/SIGNER
//
// ACCOUNT (DID)
//
// └─ EXEC/BROADCAST
// └─ EXEC/QUERY
// └─ EXEC/SIMULATE
// └─ EXEC/VOTE
// └─ EXEC/DELEGATE
// └─ EXEC/INVOKE
// └─ EXEC/SEND
//
// INTERCHAIN
//
// └─ TRANSFER/SWAP
// └─ TRANSFER/SEND
// └─ TRANSFER/ATOMIC
// └─ TRANSFER/BATCH
// └─ TRANSFER/P2P
// └─ TRANSFER/SEND
type Capability string
const (
CapExecBroadcast = capaccount.ExecBroadcast
CapExecQuery = capaccount.ExecQuery
CapExecSimulate = capaccount.ExecSimulate
CapExecVote = capaccount.ExecVote
CapExecDelegate = capaccount.ExecDelegate
CapExecInvoke = capaccount.ExecInvoke
CapExecSend = capaccount.ExecSend
CapTransferSwap = capinterchain.TransferSwap
CapTransferSend = capinterchain.TransferSend
CapTransferAtomic = capinterchain.TransferAtomic
CapTransferBatch = capinterchain.TransferBatch
CapTransferP2P = capinterchain.TransferP2p
CapCrudAsset = capvault.CrudAsset
CapCrudAuthzgrant = capvault.CrudAuthzgrant
CapCrudProfile = capvault.CrudProfile
CapCrudRecord = capvault.CrudRecord
CapUseRecovery = capvault.UseRecovery
CapUseSync = capvault.UseSync
CapUseSigner = capvault.UseSigner
)
type NewCapFunc func(string) ucan.Capability
type BuildResourceFunc func(string, string) ucan.Resource
func CreateArray(attns ...ucan.Attenuation) ucan.Attenuations {
return ucan.Attenuations(attns)
}
func New(cap ucan.Capability, rsc ucan.Resource) ucan.Attenuation {
return ucan.Attenuation{
Cap: cap,
Rsc: rsc,
}
}
// NewAccountCap creates a new account capability
func NewAccountCap(ty capaccount.CapAccount) ucan.Capability {
return capaccount.NewCap(ty)
}
// NewInterchainCap creates a new interchain capability
func NewInterchainCap(ty capinterchain.CapInterchain) ucan.Capability {
return capinterchain.NewCap(ty)
}
// NewVaultCap creates a new vault capability
func NewVaultCap(ty capvault.CapVault) ucan.Capability {
return capvault.NewCap(ty)
}
// BuildAccountResource creates a new account resource
func BuildAccountResource(ty resaccount.ResAccount, value string) ucan.Resource {
return resaccount.Build(ty, value)
}
// BuildInterchainResource creates a new interchain resource
func BuildInterchainResource(ty resinterchain.ResInterchain, value string) ucan.Resource {
return resinterchain.Build(ty, value)
}
// BuildVaultResource creates a new vault resource
func BuildVaultResource(ty resvault.ResVault, value string) ucan.Resource {
return resvault.Build(ty, value)
}

View File

@ -1,40 +0,0 @@
// Code generated from Pkl module `sonr.orm.UCAN`. DO NOT EDIT.
package policytype
import (
"encoding"
"fmt"
)
type PolicyType string
const (
POLICYTHRESHOLD PolicyType = "POLICY_THRESHOLD"
POLICYTIMELOCK PolicyType = "POLICY_TIMELOCK"
POLICYWHITELIST PolicyType = "POLICY_WHITELIST"
POLICYKEYGEN PolicyType = "POLICY_KEYGEN"
)
// String returns the string representation of PolicyType
func (rcv PolicyType) String() string {
return string(rcv)
}
var _ encoding.BinaryUnmarshaler = new(PolicyType)
// UnmarshalBinary implements encoding.BinaryUnmarshaler for PolicyType.
func (rcv *PolicyType) UnmarshalBinary(data []byte) error {
switch str := string(data); str {
case "POLICY_THRESHOLD":
*rcv = POLICYTHRESHOLD
case "POLICY_TIMELOCK":
*rcv = POLICYTIMELOCK
case "POLICY_WHITELIST":
*rcv = POLICYWHITELIST
case "POLICY_KEYGEN":
*rcv = POLICYKEYGEN
default:
return fmt.Errorf(`illegal: "%s" is not a valid PolicyType`, str)
}
return nil
}

View File

@ -0,0 +1,43 @@
// Code generated from Pkl module `sonr.orm.UCAN`. DO NOT EDIT.
package resaccount
import (
"encoding"
"fmt"
)
type ResAccount string
const (
AccSequence ResAccount = "acc/sequence"
AccNumber ResAccount = "acc/number"
ChainId ResAccount = "chain/id"
AssetCode ResAccount = "asset/code"
AuthzGrant ResAccount = "authz/grant"
)
// String returns the string representation of ResAccount
func (rcv ResAccount) String() string {
return string(rcv)
}
var _ encoding.BinaryUnmarshaler = new(ResAccount)
// UnmarshalBinary implements encoding.BinaryUnmarshaler for ResAccount.
func (rcv *ResAccount) UnmarshalBinary(data []byte) error {
switch str := string(data); str {
case "acc/sequence":
*rcv = AccSequence
case "acc/number":
*rcv = AccNumber
case "chain/id":
*rcv = ChainId
case "asset/code":
*rcv = AssetCode
case "authz/grant":
*rcv = AuthzGrant
default:
return fmt.Errorf(`illegal: "%s" is not a valid ResAccount`, str)
}
return nil
}

View File

@ -0,0 +1,33 @@
package resaccount
import "github.com/onsonr/sonr/crypto/ucan"
func Build(ty ResAccount, value string) ucan.Resource {
return newStringLengthResource(ty.String(), value)
}
type stringLengthRsc struct {
t string
v string
}
// NewStringLengthResource is a silly implementation of resource to use while
// I figure out what an OR filter on strings is. Don't use this.
func newStringLengthResource(typ, val string) ucan.Resource {
return stringLengthRsc{
t: typ,
v: val,
}
}
func (r stringLengthRsc) Type() string {
return r.t
}
func (r stringLengthRsc) Value() string {
return r.v
}
func (r stringLengthRsc) Contains(b ucan.Resource) bool {
return r.Type() == b.Type() && len(r.Value()) <= len(b.Value())
}

View File

@ -0,0 +1,43 @@
// Code generated from Pkl module `sonr.orm.UCAN`. DO NOT EDIT.
package resinterchain
import (
"encoding"
"fmt"
)
type ResInterchain string
const (
ChannnelPort ResInterchain = "channnel/port"
ChainId ResInterchain = "chain/id"
ChainName ResInterchain = "chain/name"
AccHost ResInterchain = "acc/host"
AccController ResInterchain = "acc/controller"
)
// String returns the string representation of ResInterchain
func (rcv ResInterchain) String() string {
return string(rcv)
}
var _ encoding.BinaryUnmarshaler = new(ResInterchain)
// UnmarshalBinary implements encoding.BinaryUnmarshaler for ResInterchain.
func (rcv *ResInterchain) UnmarshalBinary(data []byte) error {
switch str := string(data); str {
case "channnel/port":
*rcv = ChannnelPort
case "chain/id":
*rcv = ChainId
case "chain/name":
*rcv = ChainName
case "acc/host":
*rcv = AccHost
case "acc/controller":
*rcv = AccController
default:
return fmt.Errorf(`illegal: "%s" is not a valid ResInterchain`, str)
}
return nil
}

View File

@ -0,0 +1,33 @@
package resinterchain
import "github.com/onsonr/sonr/crypto/ucan"
func Build(ty ResInterchain, value string) ucan.Resource {
return newStringLengthResource(ty.String(), value)
}
type stringLengthRsc struct {
t string
v string
}
// NewStringLengthResource is a silly implementation of resource to use while
// I figure out what an OR filter on strings is. Don't use this.
func newStringLengthResource(typ, val string) ucan.Resource {
return stringLengthRsc{
t: typ,
v: val,
}
}
func (r stringLengthRsc) Type() string {
return r.t
}
func (r stringLengthRsc) Value() string {
return r.v
}
func (r stringLengthRsc) Contains(b ucan.Resource) bool {
return r.Type() == b.Type() && len(r.Value()) <= len(b.Value())
}

View File

@ -1,52 +0,0 @@
// Code generated from Pkl module `sonr.orm.UCAN`. DO NOT EDIT.
package resourcetype
import (
"encoding"
"fmt"
)
type ResourceType string
const (
RESACCOUNT ResourceType = "RES_ACCOUNT"
RESTRANSACTION ResourceType = "RES_TRANSACTION"
RESPOLICY ResourceType = "RES_POLICY"
RESRECOVERY ResourceType = "RES_RECOVERY"
RESVAULT ResourceType = "RES_VAULT"
RESIPFS ResourceType = "RES_IPFS"
RESIPNS ResourceType = "RES_IPNS"
RESKEYSHARE ResourceType = "RES_KEYSHARE"
)
// String returns the string representation of ResourceType
func (rcv ResourceType) String() string {
return string(rcv)
}
var _ encoding.BinaryUnmarshaler = new(ResourceType)
// UnmarshalBinary implements encoding.BinaryUnmarshaler for ResourceType.
func (rcv *ResourceType) UnmarshalBinary(data []byte) error {
switch str := string(data); str {
case "RES_ACCOUNT":
*rcv = RESACCOUNT
case "RES_TRANSACTION":
*rcv = RESTRANSACTION
case "RES_POLICY":
*rcv = RESPOLICY
case "RES_RECOVERY":
*rcv = RESRECOVERY
case "RES_VAULT":
*rcv = RESVAULT
case "RES_IPFS":
*rcv = RESIPFS
case "RES_IPNS":
*rcv = RESIPNS
case "RES_KEYSHARE":
*rcv = RESKEYSHARE
default:
return fmt.Errorf(`illegal: "%s" is not a valid ResourceType`, str)
}
return nil
}

View File

@ -0,0 +1,46 @@
// Code generated from Pkl module `sonr.orm.UCAN`. DO NOT EDIT.
package resvault
import (
"encoding"
"fmt"
)
type ResVault string
const (
KsEnclave ResVault = "ks/enclave"
LocCid ResVault = "loc/cid"
LocEntity ResVault = "loc/entity"
LocIpns ResVault = "loc/ipns"
AddrSonr ResVault = "addr/sonr"
ChainCode ResVault = "chain/code"
)
// String returns the string representation of ResVault
func (rcv ResVault) String() string {
return string(rcv)
}
var _ encoding.BinaryUnmarshaler = new(ResVault)
// UnmarshalBinary implements encoding.BinaryUnmarshaler for ResVault.
func (rcv *ResVault) UnmarshalBinary(data []byte) error {
switch str := string(data); str {
case "ks/enclave":
*rcv = KsEnclave
case "loc/cid":
*rcv = LocCid
case "loc/entity":
*rcv = LocEntity
case "loc/ipns":
*rcv = LocIpns
case "addr/sonr":
*rcv = AddrSonr
case "chain/code":
*rcv = ChainCode
default:
return fmt.Errorf(`illegal: "%s" is not a valid ResVault`, str)
}
return nil
}

View File

@ -0,0 +1,33 @@
package resvault
import "github.com/onsonr/sonr/crypto/ucan"
func Build(ty ResVault, value string) ucan.Resource {
return newStringLengthResource(ty.String(), value)
}
type stringLengthRsc struct {
t string
v string
}
// NewStringLengthResource is a silly implementation of resource to use while
// I figure out what an OR filter on strings is. Don't use this.
func newStringLengthResource(typ, val string) ucan.Resource {
return stringLengthRsc{
t: typ,
v: val,
}
}
func (r stringLengthRsc) Type() string {
return r.t
}
func (r stringLengthRsc) Value() string {
return r.v
}
func (r stringLengthRsc) Contains(b ucan.Resource) bool {
return r.Type() == b.Type() && len(r.Value()) <= len(b.Value())
}

View File

@ -1,150 +0,0 @@
package ucan
import (
"fmt"
"github.com/onsonr/sonr/crypto/mpc"
"github.com/onsonr/sonr/crypto/ucan/attns/capability"
"github.com/onsonr/sonr/crypto/ucan/attns/policytype"
"github.com/onsonr/sonr/crypto/ucan/attns/resourcetype"
)
// NewSmartAccount creates default attenuations for a smart account
func NewSmartAccount(
accountAddr string,
) Attenuations {
caps := AccountPermissions.GetCapabilities()
return Attenuations{
// Owner capabilities
{Cap: caps.Cap(CapOwner.String()), Rsc: NewResource(ResAccount, accountAddr)},
// Operation capabilities
{Cap: caps.Cap(capability.CAPEXECUTE.String()), Rsc: NewResource(ResTransaction, fmt.Sprintf("%s:*", accountAddr))},
{Cap: caps.Cap(capability.CAPPROPOSE.String()), Rsc: NewResource(ResTransaction, fmt.Sprintf("%s:*", accountAddr))},
{Cap: caps.Cap(capability.CAPSIGN.String()), Rsc: NewResource(ResTransaction, fmt.Sprintf("%s:*", accountAddr))},
// Policy capabilities
{Cap: caps.Cap(capability.CAPSETPOLICY.String()), Rsc: NewResource(ResPolicy, fmt.Sprintf("%s:*", accountAddr))},
{Cap: caps.Cap(capability.CAPSETTHRESHOLD.String()), Rsc: NewResource(ResPolicy, fmt.Sprintf("%s:threshold", accountAddr))},
}
}
// NewSmartAccountPolicy creates attenuations for policy management
func NewSmartAccountPolicy(
accountAddr string,
policyType policytype.PolicyType,
) Attenuations {
caps := AccountPermissions.GetCapabilities()
return Attenuations{
{
Cap: caps.Cap(capability.CAPSETPOLICY.String()),
Rsc: NewResource(
ResPolicy,
fmt.Sprintf("%s:%s", accountAddr, policyType),
),
},
}
}
// SmartAccountCapabilities defines the capability hierarchy
func SmartAccountCapabilities() []string {
return []string{
CapOwner.String(),
CapOperator.String(),
CapObserver.String(),
CapExecute.String(),
CapPropose.String(),
CapSign.String(),
CapSetPolicy.String(),
CapSetThreshold.String(),
CapRecover.String(),
CapSocial.String(),
}
}
// CreateVaultAttenuations creates default attenuations for a smart account
func NewService(
origin string,
) Attenuations {
caps := ServicePermissions.GetCapabilities()
return Attenuations{
// Owner capabilities
{Cap: caps.Cap(capability.CAPOWNER.String()), Rsc: NewResource(resourcetype.RESACCOUNT, origin)},
// Operation capabilities
{Cap: caps.Cap(capability.CAPEXECUTE.String()), Rsc: NewResource(resourcetype.RESTRANSACTION, fmt.Sprintf("%s:*", origin))},
{Cap: caps.Cap(capability.CAPPROPOSE.String()), Rsc: NewResource(resourcetype.RESTRANSACTION, fmt.Sprintf("%s:*", origin))},
{Cap: caps.Cap(capability.CAPSIGN.String()), Rsc: NewResource(resourcetype.RESTRANSACTION, fmt.Sprintf("%s:*", origin))},
// Policy capabilities
{Cap: caps.Cap(capability.CAPSETPOLICY.String()), Rsc: NewResource(resourcetype.RESPOLICY, fmt.Sprintf("%s:*", origin))},
{Cap: caps.Cap(capability.CAPSETTHRESHOLD.String()), Rsc: NewResource(resourcetype.RESPOLICY, fmt.Sprintf("%s:threshold", origin))},
}
}
// ServiceCapabilities defines the capability hierarchy
func ServiceCapabilities() []string {
return []string{
CapOwner.String(),
CapOperator.String(),
CapObserver.String(),
CapExecute.String(),
CapPropose.String(),
CapSign.String(),
CapResolver.String(),
CapProducer.String(),
}
}
// NewVault creates default attenuations for a smart account
func NewVault(
kss mpc.KeyEnclave,
) Attenuations {
accountAddr := kss.Address()
caps := VaultPermissions.GetCapabilities()
return Attenuations{
// Owner capabilities
{Cap: caps.Cap(capability.CAPOWNER.String()), Rsc: NewResource(resourcetype.RESACCOUNT, accountAddr)},
// Operation capabilities
{Cap: caps.Cap(capability.CAPEXECUTE.String()), Rsc: NewResource(resourcetype.RESTRANSACTION, fmt.Sprintf("%s:*", accountAddr))},
{Cap: caps.Cap(capability.CAPPROPOSE.String()), Rsc: NewResource(resourcetype.RESTRANSACTION, fmt.Sprintf("%s:*", accountAddr))},
{Cap: caps.Cap(capability.CAPSIGN.String()), Rsc: NewResource(resourcetype.RESTRANSACTION, fmt.Sprintf("%s:*", accountAddr))},
// Policy capabilities
{Cap: caps.Cap(capability.CAPSETPOLICY.String()), Rsc: NewResource(resourcetype.RESPOLICY, fmt.Sprintf("%s:*", accountAddr))},
{Cap: caps.Cap(capability.CAPSETTHRESHOLD.String()), Rsc: NewResource(resourcetype.RESPOLICY, fmt.Sprintf("%s:threshold", accountAddr))},
}
}
// NewVaultPolicy creates attenuations for policy management
func NewVaultPolicy(
accountAddr string,
policyType policytype.PolicyType,
) Attenuations {
caps := VaultPermissions.GetCapabilities()
return Attenuations{
{
Cap: caps.Cap(capability.CAPSETPOLICY.String()),
Rsc: NewResource(
resourcetype.RESPOLICY,
fmt.Sprintf("%s:%s", accountAddr, policyType),
),
},
}
}
// VaultCapabilities defines the capability hierarchy
func VaultCapabilities() []string {
return []string{
CapOwner.String(),
CapOperator.String(),
CapObserver.String(),
CapAuthenticate.String(),
CapAuthorize.String(),
CapDelegate.String(),
CapInvoke.String(),
CapExecute.String(),
CapRecover.String(),
}
}

View File

@ -1,27 +1,66 @@
package ucan package ucan
import ( import (
"context" "fmt"
) )
// CtxKey defines a distinct type for context keys used by the access var EmptyAttenuation = Attenuation{
// package Cap: Capability(nil),
type CtxKey string Rsc: Resource(nil),
// TokenCtxKey is the key for adding an access UCAN to a context.Context
const TokenCtxKey CtxKey = "UCAN"
// CtxWithToken adds a UCAN value to a context
func CtxWithToken(ctx context.Context, t Token) context.Context {
return context.WithValue(ctx, TokenCtxKey, t)
} }
// FromCtx extracts a token from a given context if one is set, returning nil // Permissions represents the type of attenuation
// otherwise type Permissions string
func FromCtx(ctx context.Context) *Token {
iface := ctx.Value(TokenCtxKey) const (
if ref, ok := iface.(*Token); ok { // AccountPermissions represents the smart account attenuation
return ref AccountPermissions = Permissions("account")
// ServicePermissions represents the service attenuation
ServicePermissions = Permissions("service")
// VaultPermissions represents the vault attenuation
VaultPermissions = Permissions("vault")
)
// Cap returns the capability for the given AttenuationPreset
func (a Permissions) NewCap(c string) Capability {
return a.GetCapabilities().Cap(c)
}
// NestedCapabilities returns the nested capabilities for the given AttenuationPreset
func (a Permissions) GetCapabilities() NestedCapabilities {
var caps []string
switch a {
case AccountPermissions:
// caps = SmartAccountCapabilities()
case VaultPermissions:
// caps = VaultCapabilities()
} }
return nil return NewNestedCapabilities(caps...)
}
// Equals returns true if the given AttenuationPreset is equal to the receiver
func (a Permissions) Equals(b Permissions) bool {
return a == b
}
// String returns the string representation of the AttenuationPreset
func (a Permissions) String() string {
return string(a)
}
// ParseAttenuationData parses raw attenuation data into a structured format
func ParseAttenuationData(data map[string]interface{}) (Permissions, map[string]interface{}, error) {
typeRaw, ok := data["preset"]
if !ok {
return "", nil, fmt.Errorf("missing preset type in attenuation data")
}
presetType, ok := typeRaw.(string)
if !ok {
return "", nil, fmt.Errorf("invalid preset type format")
}
return Permissions(presetType), data, nil
} }

View File

@ -1,164 +0,0 @@
package ucan
import (
"fmt"
"github.com/onsonr/sonr/crypto/ucan/attns/capability"
"github.com/onsonr/sonr/crypto/ucan/attns/policytype"
"github.com/onsonr/sonr/crypto/ucan/attns/resourcetype"
)
var EmptyAttenuation = Attenuation{
Cap: Capability(nil),
Rsc: Resource(nil),
}
const (
// Owner
CapOwner = capability.CAPOWNER
CapOperator = capability.CAPOPERATOR
CapObserver = capability.CAPOBSERVER
// Auth
CapAuthenticate = capability.CAPAUTHENTICATE
CapAuthorize = capability.CAPAUTHORIZE
CapDelegate = capability.CAPDELEGATE
CapInvoke = capability.CAPINVOKE
CapExecute = capability.CAPEXECUTE
CapPropose = capability.CAPPROPOSE
CapSign = capability.CAPSIGN
CapSetPolicy = capability.CAPSETPOLICY
CapSetThreshold = capability.CAPSETTHRESHOLD
CapRecover = capability.CAPRECOVER
CapSocial = capability.CAPSOCIAL
CapResolver = capability.CAPRESOLVER
CapProducer = capability.CAPPRODUCER
// Resources
ResAccount = resourcetype.RESACCOUNT
ResTransaction = resourcetype.RESTRANSACTION
ResPolicy = resourcetype.RESPOLICY
ResRecovery = resourcetype.RESRECOVERY
ResVault = resourcetype.RESVAULT
ResIPFS = resourcetype.RESIPFS
ResIPNS = resourcetype.RESIPNS
ResKeyShare = resourcetype.RESKEYSHARE
// PolicyTypes
PolicyThreshold = policytype.POLICYTHRESHOLD
PolicyTimelock = policytype.POLICYTIMELOCK
PolicyWhitelist = policytype.POLICYWHITELIST
PolicyKeyShare = policytype.POLICYKEYGEN
)
// NewVaultResource creates a new resource identifier
func NewResource(resType resourcetype.ResourceType, path string) Resource {
return NewStringLengthResource(string(resType), path)
}
// Permissions represents the type of attenuation
type Permissions string
const (
// AccountPermissions represents the smart account attenuation
AccountPermissions = Permissions("account")
// ServicePermissions represents the service attenuation
ServicePermissions = Permissions("service")
// VaultPermissions represents the vault attenuation
VaultPermissions = Permissions("vault")
)
// Cap returns the capability for the given AttenuationPreset
func (a Permissions) NewCap(c capability.Capability) Capability {
return a.GetCapabilities().Cap(c.String())
}
// NestedCapabilities returns the nested capabilities for the given AttenuationPreset
func (a Permissions) GetCapabilities() NestedCapabilities {
var caps []string
switch a {
case AccountPermissions:
caps = SmartAccountCapabilities()
case VaultPermissions:
caps = VaultCapabilities()
}
return NewNestedCapabilities(caps...)
}
// Equals returns true if the given AttenuationPreset is equal to the receiver
func (a Permissions) Equals(b Permissions) bool {
return a == b
}
// String returns the string representation of the AttenuationPreset
func (a Permissions) String() string {
return string(a)
}
// GetConstructor returns the AttenuationConstructorFunc for a Permission
func (a Permissions) GetConstructor() AttenuationConstructorFunc {
return NewAttenuationFromPreset(a)
}
// NewAttenuationFromPreset creates an AttenuationConstructorFunc for the given preset
func NewAttenuationFromPreset(preset Permissions) AttenuationConstructorFunc {
return func(v map[string]interface{}) (Attenuation, error) {
// Extract capability and resource from map
capStr, ok := v["cap"].(string)
if !ok {
return EmptyAttenuation, fmt.Errorf("missing or invalid capability in attenuation data")
}
resType, ok := v["type"].(string)
if !ok {
return EmptyAttenuation, fmt.Errorf("missing or invalid resource type in attenuation data")
}
path, ok := v["path"].(string)
if !ok {
path = "/" // Default path if not specified
}
// Create capability from preset
cap := preset.NewCap(capability.Capability(capStr))
if cap == nil {
return EmptyAttenuation, fmt.Errorf("invalid capability %s for preset %s", capStr, preset)
}
// Create resource
resource := NewResource(resourcetype.ResourceType(resType), path)
return Attenuation{
Cap: cap,
Rsc: resource,
}, nil
}
}
// GetPresetConstructor returns the appropriate AttenuationConstructorFunc for a given type
func GetPresetConstructor(attType string) (AttenuationConstructorFunc, error) {
preset := Permissions(attType)
switch preset {
case AccountPermissions, ServicePermissions, VaultPermissions:
return NewAttenuationFromPreset(preset), nil
default:
return nil, fmt.Errorf("unknown attenuation preset: %s", attType)
}
}
// ParseAttenuationData parses raw attenuation data into a structured format
func ParseAttenuationData(data map[string]interface{}) (Permissions, map[string]interface{}, error) {
typeRaw, ok := data["preset"]
if !ok {
return "", nil, fmt.Errorf("missing preset type in attenuation data")
}
presetType, ok := typeRaw.(string)
if !ok {
return "", nil, fmt.Errorf("invalid preset type format")
}
return Permissions(presetType), data, nil
}

View File

@ -1,62 +0,0 @@
package ucan
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestAttenuationPresetConstructor(t *testing.T) {
tests := []struct {
name string
data map[string]interface{}
wantErr bool
}{
{
name: "valid smart account attenuation",
data: map[string]interface{}{
"preset": "account",
"cap": string(CapOwner),
"type": string(ResAccount),
"path": "/accounts/123",
},
wantErr: false,
},
{
name: "valid vault attenuation",
data: map[string]interface{}{
"preset": "vault",
"cap": string(CapOperator),
"type": string(ResVault),
"path": "/vaults/456",
},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
preset, data, err := ParseAttenuationData(tt.data)
if tt.wantErr {
assert.Error(t, err)
return
}
assert.NoError(t, err)
constructor, err := GetPresetConstructor(preset.String())
if tt.wantErr {
assert.Error(t, err)
return
}
assert.NoError(t, err)
attenuation, err := constructor(data)
if tt.wantErr {
assert.Error(t, err)
return
}
assert.NoError(t, err)
assert.NotNil(t, attenuation)
})
}
}

View File

@ -0,0 +1,310 @@
---
title: Overview
---
# Overview
:::note Synopsis
Learn about what the Fee Middleware module is, and how to build custom modules that utilize the Fee Middleware functionality
:::
## What is the Fee Middleware module?
IBC does not depend on relayer operators for transaction verification. However, the relayer infrastructure ensures liveness of the Interchain network — operators listen for packets sent through channels opened between chains, and perform the vital service of ferrying these packets (and proof of the transaction on the sending chain/receipt on the receiving chain) to the clients on each side of the channel.
Though relaying is permissionless and completely decentralized and accessible, it does come with operational costs. Running full nodes to query transaction proofs and paying for transaction fees associated with IBC packets are two of the primary cost burdens which have driven the overall discussion on **a general, in-protocol incentivization mechanism for relayers**.
Initially, a [simple proposal](https://github.com/cosmos/ibc/pull/577/files) was created to incentivize relaying on ICS20 token transfers on the destination chain. However, the proposal was specific to ICS20 token transfers and would have to be reimplemented in this format on every other IBC application module.
After much discussion, the proposal was expanded to a [general incentivisation design](https://github.com/cosmos/ibc/tree/master/spec/app/ics-029-fee-payment) that can be adopted by any ICS application protocol as [middleware](../../01-ibc/04-middleware/02-develop.md).
## Concepts
ICS29 fee payments in this middleware design are built on the assumption that sender chains are the source of incentives — the chain on which packets are incentivized is the chain that distributes fees to relayer operators. However, as part of the IBC packet flow, messages have to be submitted on both sender and destination chains. This introduces the requirement of a mapping of relayer operator's addresses on both chains.
To achieve the stated requirements, the **fee middleware module has two main groups of functionality**:
- Registering of relayer addresses associated with each party involved in relaying the packet on the source chain. This registration process can be automated on start up of relayer infrastructure and happens only once, not every packet flow.
This is described in the [Fee distribution section](04-fee-distribution.md).
- Escrowing fees by any party which will be paid out to each rightful party on completion of the packet lifecycle.
This is described in the [Fee messages section](03-msgs.md).
We complete the introduction by giving a list of definitions of relevant terminology.
`Forward relayer`: The relayer that submits the `MsgRecvPacket` message for a given packet (on the destination chain).
`Reverse relayer`: The relayer that submits the `MsgAcknowledgement` message for a given packet (on the source chain).
`Timeout relayer`: The relayer that submits the `MsgTimeout` or `MsgTimeoutOnClose` messages for a given packet (on the source chain).
`Payee`: The account address on the source chain to be paid on completion of the packet lifecycle. The packet lifecycle on the source chain completes with the receipt of a `MsgTimeout`/`MsgTimeoutOnClose` or a `MsgAcknowledgement`.
`Counterparty payee`: The account address to be paid on completion of the packet lifecycle on the destination chain. The package lifecycle on the destination chain completes with a successful `MsgRecvPacket`.
`Refund address`: The address of the account paying for the incentivization of packet relaying. The account is refunded timeout fees upon successful acknowledgement. In the event of a packet timeout, both acknowledgement and receive fees are refunded.
## Known Limitations
- At the time of the release of the feature (ibc-go v4) fee payments middleware only supported incentivisation of new channels; however, with the release of channel upgradeability (ibc-go v8.1) it is possible to enable incentivisation of all existing channels.
- Even though unlikely, there exists a DoS attack vector on a fee-enabled channel if 1) there exists a relayer software implementation that is incentivised to timeout packets if the timeout fee is greater than the sum of the fees to receive and acknowledge the packet, and 2) only this type of implementation is used by operators relaying on the channel. In this situation, an attacker could continuously incentivise the relayers to never deliver the packets by incrementing the timeout fee of the packets above the sum of the receive and acknowledge fees. However, this situation is unlikely to occur because 1) another relayer behaving honestly could relay the packets before they timeout, and 2) the attack would be costly because the attacker would need to incentivise the timeout fee of the packets with their own funds. Given the low impact and unlikelihood of the attack we have decided to accept this risk and not implement any mitigation mesaures.
## Module Integration
The Fee Middleware module, as the name suggests, plays the role of an IBC middleware and as such must be configured by chain developers to route and handle IBC messages correctly.
For Cosmos SDK chains this setup is done via the `app/app.go` file, where modules are constructed and configured in order to bootstrap the blockchain application.
## Example integration of the Fee Middleware module
```go
// app.go
// Register the AppModule for the fee middleware module
ModuleBasics = module.NewBasicManager(
...
ibcfee.AppModuleBasic{},
...
)
...
// Add module account permissions for the fee middleware module
maccPerms = map[string][]string{
...
ibcfeetypes.ModuleName: nil,
}
...
// Add fee middleware Keeper
type App struct {
...
IBCFeeKeeper ibcfeekeeper.Keeper
...
}
...
// Create store keys
keys := sdk.NewKVStoreKeys(
...
ibcfeetypes.StoreKey,
...
)
...
app.IBCFeeKeeper = ibcfeekeeper.NewKeeper(
appCodec, keys[ibcfeetypes.StoreKey],
app.IBCKeeper.ChannelKeeper, // may be replaced with IBC middleware
app.IBCKeeper.ChannelKeeper,
&app.IBCKeeper.PortKeeper, app.AccountKeeper, app.BankKeeper,
)
// See the section below for configuring an application stack with the fee middleware module
...
// Register fee middleware AppModule
app.moduleManager = module.NewManager(
...
ibcfee.NewAppModule(app.IBCFeeKeeper),
)
...
// Add fee middleware to begin blocker logic
app.moduleManager.SetOrderBeginBlockers(
...
ibcfeetypes.ModuleName,
...
)
// Add fee middleware to end blocker logic
app.moduleManager.SetOrderEndBlockers(
...
ibcfeetypes.ModuleName,
...
)
// Add fee middleware to init genesis logic
app.moduleManager.SetOrderInitGenesis(
...
ibcfeetypes.ModuleName,
...
)
```
## Configuring an application stack with Fee Middleware
As mentioned in [IBC middleware development](../../01-ibc/04-middleware/02-develop.md) an application stack may be composed of many or no middlewares that nest a base application.
These layers form the complete set of application logic that enable developers to build composable and flexible IBC application stacks.
For example, an application stack may be just a single base application like `transfer`, however, the same application stack composed with `29-fee` will nest the `transfer` base application
by wrapping it with the Fee Middleware module.
### Transfer
See below for an example of how to create an application stack using `transfer` and `29-fee`.
The following `transferStack` is configured in `app/app.go` and added to the IBC `Router`.
The in-line comments describe the execution flow of packets between the application stack and IBC core.
```go
// Create Transfer Stack
// SendPacket, since it is originating from the application to core IBC:
// transferKeeper.SendPacket -> fee.SendPacket -> channel.SendPacket
// RecvPacket, message that originates from core IBC and goes down to app, the flow is the other way
// channel.RecvPacket -> fee.OnRecvPacket -> transfer.OnRecvPacket
// transfer stack contains (from top to bottom):
// - IBC Fee Middleware
// - Transfer
// create IBC module from bottom to top of stack
var transferStack porttypes.IBCModule
transferStack = transfer.NewIBCModule(app.TransferKeeper)
transferStack = ibcfee.NewIBCMiddleware(transferStack, app.IBCFeeKeeper)
// Add transfer stack to IBC Router
ibcRouter.AddRoute(ibctransfertypes.ModuleName, transferStack)
```
### Interchain Accounts
See below for an example of how to create an application stack using `27-interchain-accounts` and `29-fee`.
The following `icaControllerStack` and `icaHostStack` are configured in `app/app.go` and added to the IBC `Router` with the associated authentication module.
The in-line comments describe the execution flow of packets between the application stack and IBC core.
```go
// Create Interchain Accounts Stack
// SendPacket, since it is originating from the application to core IBC:
// icaAuthModuleKeeper.SendTx -> icaController.SendPacket -> fee.SendPacket -> channel.SendPacket
// initialize ICA module with mock module as the authentication module on the controller side
var icaControllerStack porttypes.IBCModule
icaControllerStack = ibcmock.NewIBCModule(&mockModule, ibcmock.NewMockIBCApp("", scopedICAMockKeeper))
app.ICAAuthModule = icaControllerStack.(ibcmock.IBCModule)
icaControllerStack = icacontroller.NewIBCMiddleware(icaControllerStack, app.ICAControllerKeeper)
icaControllerStack = ibcfee.NewIBCMiddleware(icaControllerStack, app.IBCFeeKeeper)
// RecvPacket, message that originates from core IBC and goes down to app, the flow is:
// channel.RecvPacket -> fee.OnRecvPacket -> icaHost.OnRecvPacket
var icaHostStack porttypes.IBCModule
icaHostStack = icahost.NewIBCModule(app.ICAHostKeeper)
icaHostStack = ibcfee.NewIBCMiddleware(icaHostStack, app.IBCFeeKeeper)
// Add authentication module, controller and host to IBC router
ibcRouter.
// the ICA Controller middleware needs to be explicitly added to the IBC Router because the
// ICA controller module owns the port capability for ICA. The ICA authentication module
// owns the channel capability.
AddRoute(ibcmock.ModuleName+icacontrollertypes.SubModuleName, icaControllerStack) // ica with mock auth module stack route to ica (top level of middleware stack)
AddRoute(icacontrollertypes.SubModuleName, icaControllerStack).
AddRoute(icahosttypes.SubModuleName, icaHostStack).
```
## Fee Distribution
Packet fees are divided into 3 distinct amounts in order to compensate relayer operators for packet relaying on fee enabled IBC channels.
- `RecvFee`: The sum of all packet receive fees distributed to a payee for successful execution of `MsgRecvPacket`.
- `AckFee`: The sum of all packet acknowledgement fees distributed to a payee for successful execution of `MsgAcknowledgement`.
- `TimeoutFee`: The sum of all packet timeout fees distributed to a payee for successful execution of `MsgTimeout`.
## Register a counterparty payee address for forward relaying
As mentioned in [ICS29 Concepts](01-overview.md#concepts), the forward relayer describes the actor who performs the submission of `MsgRecvPacket` on the destination chain.
Fee distribution for incentivized packet relays takes place on the packet source chain.
> Relayer operators are expected to register a counterparty payee address, in order to be compensated accordingly with `RecvFee`s upon completion of a packet lifecycle.
The counterparty payee address registered on the destination chain is encoded into the packet acknowledgement and communicated as such to the source chain for fee distribution.
**If a counterparty payee is not registered for the forward relayer on the destination chain, the escrowed fees will be refunded upon fee distribution.**
### Relayer operator actions
A transaction must be submitted **to the destination chain** including a `CounterpartyPayee` address of an account on the source chain.
The transaction must be signed by the `Relayer`.
Note: If a module account address is used as the `CounterpartyPayee` but the module has been set as a blocked address in the `BankKeeper`, the refunding to the module account will fail. This is because many modules use invariants to compare internal tracking of module account balances against the actual balance of the account stored in the `BankKeeper`. If a token transfer to the module account occurs without going through this module and updating the account balance of the module on the `BankKeeper`, then invariants may break and unknown behaviour could occur depending on the module implementation. Therefore, if it is desirable to use a module account that is currently blocked, the module developers should be consulted to gauge to possibility of removing the module account from the blocked list.
```go
type MsgRegisterCounterpartyPayee struct {
// unique port identifier
PortId string
// unique channel identifier
ChannelId string
// the relayer address
Relayer string
// the counterparty payee address
CounterpartyPayee string
}
```
> This message is expected to fail if:
>
> - `PortId` is invalid (see [24-host naming requirements](https://github.com/cosmos/ibc/blob/master/spec/core/ics-024-host-requirements/README.md#paths-identifiers-separators).
> - `ChannelId` is invalid (see [24-host naming requirements](https://github.com/cosmos/ibc/blob/master/spec/core/ics-024-host-requirements/README.md#paths-identifiers-separators)).
> - `Relayer` is an invalid address (see [Cosmos SDK Addresses](https://github.com/cosmos/cosmos-sdk/blob/main/docs/learn/beginner/03-accounts.md#addresses)).
> - `CounterpartyPayee` is empty or contains more than 2048 bytes.
See below for an example CLI command:
```bash
simd tx ibc-fee register-counterparty-payee transfer channel-0 \
cosmos1rsp837a4kvtgp2m4uqzdge0zzu6efqgucm0qdh \
osmo1v5y0tz01llxzf4c2afml8s3awue0ymju22wxx2 \
--from cosmos1rsp837a4kvtgp2m4uqzdge0zzu6efqgucm0qdh
```
## Register an alternative payee address for reverse and timeout relaying
As mentioned in [ICS29 Concepts](01-overview.md#concepts), the reverse relayer describes the actor who performs the submission of `MsgAcknowledgement` on the source chain.
Similarly the timeout relayer describes the actor who performs the submission of `MsgTimeout` (or `MsgTimeoutOnClose`) on the source chain.
> Relayer operators **may choose** to register an optional payee address, in order to be compensated accordingly with `AckFee`s and `TimeoutFee`s upon completion of a packet life cycle.
If a payee is not registered for the reverse or timeout relayer on the source chain, then fee distribution assumes the default behaviour, where fees are paid out to the relayer account which delivers `MsgAcknowledgement` or `MsgTimeout`/`MsgTimeoutOnClose`.
### Relayer operator actions
A transaction must be submitted **to the source chain** including a `Payee` address of an account on the source chain.
The transaction must be signed by the `Relayer`.
Note: If a module account address is used as the `Payee` it is recommended to [turn off invariant checks](https://github.com/cosmos/ibc-go/blob/v7.0.0/testing/simapp/app.go#L727) for that module.
```go
type MsgRegisterPayee struct {
// unique port identifier
PortId string
// unique channel identifier
ChannelId string
// the relayer address
Relayer string
// the payee address
Payee string
}
```
> This message is expected to fail if:
>
> - `PortId` is invalid (see [24-host naming requirements](https://github.com/cosmos/ibc/blob/master/spec/core/ics-024-host-requirements/README.md#paths-identifiers-separators).
> - `ChannelId` is invalid (see [24-host naming requirements](https://github.com/cosmos/ibc/blob/master/spec/core/ics-024-host-requirements/README.md#paths-identifiers-separators)).
> - `Relayer` is an invalid address (see [Cosmos SDK Addresses](https://github.com/cosmos/cosmos-sdk/blob/main/docs/learn/beginner/03-accounts.md#addresses)).
> - `Payee` is an invalid address (see [Cosmos SDK Addresses](https://github.com/cosmos/cosmos-sdk/blob/main/docs/learn/beginner/03-accounts.md#addresses)).
See below for an example CLI command:
```bash
simd tx ibc-fee register-payee transfer channel-0 \
cosmos1rsp837a4kvtgp2m4uqzdge0zzu6efqgucm0qdh \
cosmos153lf4zntqt33a4v0sm5cytrxyqn78q7kz8j8x5 \
--from cosmos1rsp837a4kvtgp2m4uqzdge0zzu6efqgucm0qdh
```

View File

@ -0,0 +1,178 @@
---
title: Overview
---
# Overview
:::note Synopsis
Learn about what the token Transfer module is
:::
## What is the Transfer module?
Transfer is the Cosmos SDK implementation of the [ICS-20](https://github.com/cosmos/ibc/tree/master/spec/app/ics-020-fungible-token-transfer) protocol, which enables cross-chain fungible token transfers.
## Concepts
### Acknowledgements
ICS20 uses the recommended acknowledgement format as specified by [ICS 04](https://github.com/cosmos/ibc/tree/master/spec/core/ics-004-channel-and-packet-semantics#acknowledgement-envelope).
A successful receive of a transfer packet will result in a Result Acknowledgement being written
with the value `[]byte{byte(1)}` in the `Response` field.
An unsuccessful receive of a transfer packet will result in an Error Acknowledgement being written
with the error message in the `Response` field.
### Denomination trace
The denomination trace corresponds to the information that allows a token to be traced back to its
origin chain. It contains a sequence of port and channel identifiers ordered from the most recent to
the oldest in the timeline of transfers.
This information is included on the token's base denomination field in the form of a hash to prevent an
unbounded denomination length. For example, the token `transfer/channelToA/uatom` will be displayed
as `ibc/7F1D3FCF4AE79E1554D670D1AD949A9BA4E4A3C76C63093E17E446A46061A7A2`. The human readable denomination
is stored using `x/bank` module's [denom metadata](https://docs.cosmos.network/main/build/modules/bank#denom-metadata)
feature. You may display the human readable denominations by querying balances with the `--resolve-denom` flag, as in:
```shell
simd query bank balances [address] --resolve-denom
```
Each send to any chain other than the one it was previously received from is a movement forwards in
the token's timeline. This causes trace to be added to the token's history and the destination port
and destination channel to be prefixed to the denomination. In these instances the sender chain is
acting as the "source zone". When the token is sent back to the chain it previously received from, the
prefix is removed. This is a backwards movement in the token's timeline and the sender chain is
acting as the "sink zone".
It is strongly recommended to read the full details of [ADR 001: Coin Source Tracing](/architecture/adr-001-coin-source-tracing) to understand the implications and context of the IBC token representations.
## UX suggestions for clients
For clients (wallets, exchanges, applications, block explorers, etc) that want to display the source of the token, it is recommended to use the following alternatives for each of the cases below:
### Direct connection
If the denomination trace contains a single identifier prefix pair (as in the example above), then
the easiest way to retrieve the chain and light client identifier is to map the trace information
directly. In summary, this requires querying the channel from the denomination trace identifiers,
and then the counterparty client state using the counterparty port and channel identifiers from the
retrieved channel.
A general pseudo algorithm would look like the following:
1. Query the full denomination trace.
2. Query the channel with the `portID/channelID` pair, which corresponds to the first destination of the
token.
3. Query the client state using the identifiers pair. Note that this query will return a `"Not
Found"` response if the current chain is not connected to this channel.
4. Retrieve the client identifier or chain identifier from the client state (eg: on
Tendermint clients) and store it locally.
Using the gRPC gateway client service the steps above would be, with a given IBC token `ibc/7F1D3FCF4AE79E1554D670D1AD949A9BA4E4A3C76C63093E17E446A46061A7A2` stored on `chainB`:
1. `GET /ibc/apps/transfer/v1/denom_traces/7F1D3FCF4AE79E1554D670D1AD949A9BA4E4A3C76C63093E17E446A46061A7A2` -> `{"path": "transfer/channelToA", "base_denom": "uatom"}`
2. `GET /ibc/apps/transfer/v1/channels/channelToA/ports/transfer/client_state"` -> `{"client_id": "clientA", "chain-id": "chainA", ...}`
3. `GET /ibc/apps/transfer/v1/channels/channelToA/ports/transfer"` -> `{"channel_id": "channelToA", port_id": "transfer", counterparty: {"channel_id": "channelToB", port_id": "transfer"}, ...}`
4. `GET /ibc/apps/transfer/v1/channels/channelToB/ports/transfer/client_state" -> {"client_id": "clientB", "chain-id": "chainB", ...}`
Then, the token transfer chain path for the `uatom` denomination would be: `chainA` -> `chainB`.
### Multiple hops
The multiple channel hops case applies when the token has passed through multiple chains between the original source and final destination chains.
The IBC protocol doesn't know the topology of the overall network (i.e connections between chains and identifier names between them). For this reason, in the multiple hops case, a particular chain in the timeline of the individual transfers can't query the chain and client identifiers of the other chains.
Take for example the following sequence of transfers `A -> B -> C` for an IBC token, with a final prefix path (trace info) of `transfer/channelChainC/transfer/channelChainB`. What the paragraph above means is that even in the case that chain `C` is directly connected to chain `A`, querying the port and channel identifiers that chain `B` uses to connect to chain `A` (eg: `transfer/channelChainA`) can be completely different from the one that chain `C` uses to connect to chain `A` (eg: `transfer/channelToChainA`).
Thus the proposed solution for clients that the IBC team recommends are the following:
- **Connect to all chains**: Connecting to all the chains in the timeline would allow clients to
perform the queries outlined in the [direct connection](#direct-connection) section to each
relevant chain. By repeatedly following the port and channel denomination trace transfer timeline,
clients should always be able to find all the relevant identifiers. This comes at the tradeoff
that the client must connect to nodes on each of the chains in order to perform the queries.
- **Relayer as a Service (RaaS)**: A longer term solution is to use/create a relayer service that
could map the denomination trace to the chain path timeline for each token (i.e `origin chain ->
chain #1 -> ... -> chain #(n-1) -> final chain`). These services could provide merkle proofs in
order to allow clients to optionally verify the path timeline correctness for themselves by
running light clients. If the proofs are not verified, they should be considered as trusted third
parties services. Additionally, client would be advised in the future to use RaaS that support the
largest number of connections between chains in the ecosystem. Unfortunately, none of the existing
public relayers (in [Golang](https://github.com/cosmos/relayer) and
[Rust](https://github.com/informalsystems/ibc-rs)), provide this service to clients.
:::tip
The only viable alternative for clients (at the time of writing) to tokens with multiple connection hops, is to connect to all chains directly and perform relevant queries to each of them in the sequence.
:::
## Forwarding
:::info
Token forwarding and unwinding is supported only on ICS20 v2 transfer channels.
:::
Forwarding allows tokens to be routed to a final destination through multiple (up to 8) intermediary
chains. With forwarding, it's also possible to unwind IBC vouchers to their native chain, and forward
them afterwards to another destination, all with just a single transfer transaction on the sending chain.
### Forward tokens
Native tokens or IBC vouchers on any chain can be forwarded through intermediary chains to reach their
final destination. For example, given the topology below, with 3 chains and a transfer channel between
chains A and B and between chains B and C:
![Light Mode Forwarding](./images/forwarding-3-chains-light.png#gh-light-mode-only)![Dark Mode Forwarding](./images/forwarding-3-chains-dark.png#gh-dark-mode-only)
Native tokens on chain `A` can be sent to chain `C` through chain `B`. The routing is specified by the
source port ID and channel ID of choice on every intermediary chain. In this example, there is only one
forwarding hop on chain `B` and the port ID, channel ID pair is `transfer`, `channelBToC`. Forwarding of
a multi-denom collections of tokens is also allowed (i.e. forwarding of tokens of different denominations).
### Unwind tokens
Taking again as an example the topology from the previous section, we assume that native tokens on chain `A`
have been transferred to chain `C`. The IBC vouchers on chain `C` have the denomination trace
`transfer/channelCtoB/transfer/channelBtoA`, and with forwarding it is possible to submit a transfer message
on chain `C` and automatically unwind the vouchers through chain `B` to chain `A`, so that the tokens recovered
on the origin chain regain their native denomination. In order to execute automatic unwinding, the transfer
module does not require extra user input: the unwind route is encoded in the denomination trace with the
pairs of destination port ID, channel ID that are added on every chain where the tokens are received.
Please note that unwinding of vouchers is only allowed when vouchers transferred all share the same denomination
trace (signifying coins that all originate from the same source). It is not possible to unwind vouchers of two different
IBC denominations, since they come from different source chains.
### Unwind tokens and then forward
Unwinding and forwarding can be used in combination, so that vouchers are first unwound to their origin chain
and then forwarded to a final destination. The same restriction as in the unwinding case applies: only vouchers
of a single IBC denomination can be used.
## Locked funds
In some [exceptional cases](/architecture/adr-026-ibc-client-recovery-mechanisms#exceptional-cases), a client state associated with a given channel cannot be updated. This causes that funds from fungible tokens in that channel will be permanently locked and thus can no longer be transferred.
To mitigate this, a client update governance proposal can be submitted to update the frozen client
with a new valid header. Once the proposal passes the client state will be unfrozen and the funds
from the associated channels will then be unlocked. This mechanism only applies to clients that
allow updates via governance, such as Tendermint clients.
In addition to this, it's important to mention that a token must be sent back along the exact route
that it took originally in order to return it to its original form on the source chain (eg: the
Cosmos Hub for the `uatom`). Sending a token back to the same chain across a different channel will
**not** move the token back across its timeline. If a channel in the chain history closes before the
token can be sent back across that channel, then the token will not be returnable to its original
form.
## Security considerations
For safety, no other module must be capable of minting tokens with the `ibc/` prefix. The IBC
transfer module needs a subset of the denomination space that only it can create tokens in.
## Channel Closure
The IBC transfer module does not support channel closure.

View File

@ -41,14 +41,14 @@ theme:
primary: cyan primary: cyan
accent: cyan accent: cyan
toggle: toggle:
icon: material/toggle-switch icon: material/moon-waning-crescent
name: Switch to dark mode name: Switch to dark mode
- media: "(prefers-color-scheme: dark)" - media: "(prefers-color-scheme: dark)"
scheme: slate scheme: slate
primary: black primary: black
accent: cyan accent: cyan
toggle: toggle:
icon: material/toggle-switch-off icon: material/sun
name: Switch to system preference name: Switch to system preference
font: font:
text: Geist text: Geist

15
go.mod
View File

@ -83,6 +83,7 @@ require (
github.com/multiformats/go-multicodec v0.9.0 github.com/multiformats/go-multicodec v0.9.0
github.com/multiformats/go-multihash v0.2.3 github.com/multiformats/go-multihash v0.2.3
github.com/multiformats/go-varint v0.0.7 github.com/multiformats/go-varint v0.0.7
github.com/ncruces/go-sqlite3 v0.21.1
github.com/pkg/errors v0.9.1 github.com/pkg/errors v0.9.1
github.com/segmentio/ksuid v1.0.4 github.com/segmentio/ksuid v1.0.4
github.com/spf13/cast v1.6.0 github.com/spf13/cast v1.6.0
@ -94,13 +95,9 @@ require (
github.com/strangelove-ventures/tokenfactory v0.50.0 github.com/strangelove-ventures/tokenfactory v0.50.0
github.com/stretchr/testify v1.10.0 github.com/stretchr/testify v1.10.0
golang.org/x/crypto v0.31.0 golang.org/x/crypto v0.31.0
golang.org/x/exp v0.0.0-20241204233417-43b7b7cde48d
google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9 google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9
google.golang.org/grpc v1.67.1 google.golang.org/grpc v1.67.1
google.golang.org/protobuf v1.35.2 google.golang.org/protobuf v1.35.2
gorm.io/driver/postgres v1.5.11
gorm.io/driver/sqlite v1.5.6
gorm.io/gorm v1.25.12
lukechampine.com/blake3 v1.3.0 lukechampine.com/blake3 v1.3.0
) )
@ -235,15 +232,9 @@ require (
github.com/ipld/go-codec-dagpb v1.6.0 // indirect github.com/ipld/go-codec-dagpb v1.6.0 // indirect
github.com/ipld/go-ipld-prime v0.21.0 // indirect github.com/ipld/go-ipld-prime v0.21.0 // indirect
github.com/ipshipyard/p2p-forge v0.0.2 // indirect github.com/ipshipyard/p2p-forge v0.0.2 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
github.com/jackc/pgx/v5 v5.7.1 // indirect
github.com/jackc/puddle/v2 v2.2.2 // indirect
github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect
github.com/jbenet/goprocess v0.1.4 // indirect github.com/jbenet/goprocess v0.1.4 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/jmhodges/levigo v1.0.0 // indirect github.com/jmhodges/levigo v1.0.0 // indirect
github.com/klauspost/compress v1.17.11 // indirect github.com/klauspost/compress v1.17.11 // indirect
@ -270,7 +261,6 @@ require (
github.com/manifoldco/promptui v0.9.0 // indirect github.com/manifoldco/promptui v0.9.0 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mattn/go-sqlite3 v1.14.22 // indirect
github.com/mholt/acmez/v2 v2.0.3 // indirect github.com/mholt/acmez/v2 v2.0.3 // indirect
github.com/miekg/dns v1.1.62 // indirect github.com/miekg/dns v1.1.62 // indirect
github.com/mimoo/StrobeGo v0.0.0-20181016162300-f8f6d4d2b643 // indirect github.com/mimoo/StrobeGo v0.0.0-20181016162300-f8f6d4d2b643 // indirect
@ -287,6 +277,7 @@ require (
github.com/multiformats/go-multiaddr-dns v0.4.1 // indirect github.com/multiformats/go-multiaddr-dns v0.4.1 // indirect
github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect
github.com/multiformats/go-multistream v0.6.0 // indirect github.com/multiformats/go-multistream v0.6.0 // indirect
github.com/ncruces/julianday v1.0.0 // indirect
github.com/oasisprotocol/curve25519-voi v0.0.0-20230904125328-1f23a7beb09a // indirect github.com/oasisprotocol/curve25519-voi v0.0.0-20230904125328-1f23a7beb09a // indirect
github.com/oklog/run v1.1.0 // indirect github.com/oklog/run v1.1.0 // indirect
github.com/onsi/ginkgo/v2 v2.22.0 // indirect github.com/onsi/ginkgo/v2 v2.22.0 // indirect
@ -334,6 +325,7 @@ require (
github.com/subosito/gotenv v1.6.0 // indirect github.com/subosito/gotenv v1.6.0 // indirect
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect
github.com/tendermint/go-amino v0.16.0 // indirect github.com/tendermint/go-amino v0.16.0 // indirect
github.com/tetratelabs/wazero v1.8.2 // indirect
github.com/tidwall/btree v1.7.0 // indirect github.com/tidwall/btree v1.7.0 // indirect
github.com/ulikunitz/xz v0.5.11 // indirect github.com/ulikunitz/xz v0.5.11 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect github.com/valyala/bytebufferpool v1.0.0 // indirect
@ -362,6 +354,7 @@ require (
go.uber.org/multierr v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect go.uber.org/zap v1.27.0 // indirect
go4.org v0.0.0-20230225012048-214862532bf5 // indirect go4.org v0.0.0-20230225012048-214862532bf5 // indirect
golang.org/x/exp v0.0.0-20241204233417-43b7b7cde48d // indirect
golang.org/x/mod v0.22.0 // indirect golang.org/x/mod v0.22.0 // indirect
golang.org/x/net v0.32.0 // indirect golang.org/x/net v0.32.0 // indirect
golang.org/x/oauth2 v0.23.0 // indirect golang.org/x/oauth2 v0.23.0 // indirect

26
go.sum
View File

@ -1766,14 +1766,6 @@ github.com/iris-contrib/blackfriday v2.0.0+incompatible/go.mod h1:UzZ2bDEoaSGPbk
github.com/iris-contrib/go.uuid v2.0.0+incompatible/go.mod h1:iz2lgM/1UnEf1kP0L/+fafWORmlnuysV2EMP8MW+qe0= github.com/iris-contrib/go.uuid v2.0.0+incompatible/go.mod h1:iz2lgM/1UnEf1kP0L/+fafWORmlnuysV2EMP8MW+qe0=
github.com/iris-contrib/i18n v0.0.0-20171121225848-987a633949d0/go.mod h1:pMCz62A0xJL6I+umB2YTlFRwWXaDFA0jy+5HzGiJjqI= github.com/iris-contrib/i18n v0.0.0-20171121225848-987a633949d0/go.mod h1:pMCz62A0xJL6I+umB2YTlFRwWXaDFA0jy+5HzGiJjqI=
github.com/iris-contrib/schema v0.0.1/go.mod h1:urYA3uvUNG1TIIjOSCzHr9/LmbQo8LrOcOqfqxa4hXw= github.com/iris-contrib/schema v0.0.1/go.mod h1:urYA3uvUNG1TIIjOSCzHr9/LmbQo8LrOcOqfqxa4hXw=
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo=
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
github.com/jackc/pgx/v5 v5.7.1 h1:x7SYsPBYDkHDksogeSmZZ5xzThcTgRz++I5E+ePFUcs=
github.com/jackc/pgx/v5 v5.7.1/go.mod h1:e7O26IywZZ+naJtWWos6i6fvWK+29etgITqrqHLfoZA=
github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo=
github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA=
@ -1786,10 +1778,6 @@ github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267/go.mod h1:h1n
github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=
github.com/jhump/protoreflect v1.15.3 h1:6SFRuqU45u9hIZPJAoZ8c28T3nK64BNdp9w6jFonzls= github.com/jhump/protoreflect v1.15.3 h1:6SFRuqU45u9hIZPJAoZ8c28T3nK64BNdp9w6jFonzls=
github.com/jhump/protoreflect v1.15.3/go.mod h1:4ORHmSBmlCW8fh3xHmJMGyul1zNqZK4Elxc8qKP+p1k= github.com/jhump/protoreflect v1.15.3/go.mod h1:4ORHmSBmlCW8fh3xHmJMGyul1zNqZK4Elxc8qKP+p1k=
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
@ -1971,8 +1959,6 @@ github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsO
github.com/mattn/go-sqlite3 v1.14.5/go.mod h1:WVKg1VTActs4Qso6iwGbiFih2UIHo0ENGwNd0Lj+XmI= github.com/mattn/go-sqlite3 v1.14.5/go.mod h1:WVKg1VTActs4Qso6iwGbiFih2UIHo0ENGwNd0Lj+XmI=
github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU=
github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE=
github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
@ -2073,6 +2059,10 @@ github.com/nats-io/nkeys v0.0.2/go.mod h1:dab7URMsZm6Z/jp9Z5UGa87Uutgc2mVpXLC4B7
github.com/nats-io/nkeys v0.4.4/go.mod h1:XUkxdLPTufzlihbamfzQ7mw/VGx6ObUs+0bN5sNvt64= github.com/nats-io/nkeys v0.4.4/go.mod h1:XUkxdLPTufzlihbamfzQ7mw/VGx6ObUs+0bN5sNvt64=
github.com/nats-io/nkeys v0.4.5/go.mod h1:XUkxdLPTufzlihbamfzQ7mw/VGx6ObUs+0bN5sNvt64= github.com/nats-io/nkeys v0.4.5/go.mod h1:XUkxdLPTufzlihbamfzQ7mw/VGx6ObUs+0bN5sNvt64=
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
github.com/ncruces/go-sqlite3 v0.21.1 h1:cbzIOY3jQrXZWVsBfH9TCFj/iqqMIcJ7PLye4AAEwoQ=
github.com/ncruces/go-sqlite3 v0.21.1/go.mod h1:zxMOaSG5kFYVFK4xQa0pdwIszqxqJ0W0BxBgwdrNjuA=
github.com/ncruces/julianday v1.0.0 h1:fH0OKwa7NWvniGQtxdJRxAgkBMolni2BjDHaWTxqt7M=
github.com/ncruces/julianday v1.0.0/go.mod h1:Dusn2KvZrrovOMJuOt0TNXL6tB7U2E8kvza5fFc9G7g=
github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
@ -2390,6 +2380,8 @@ github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
github.com/tendermint/go-amino v0.16.0 h1:GyhmgQKvqF82e2oZeuMSp9JTN0N09emoSZlb2lyGa2E= github.com/tendermint/go-amino v0.16.0 h1:GyhmgQKvqF82e2oZeuMSp9JTN0N09emoSZlb2lyGa2E=
github.com/tendermint/go-amino v0.16.0/go.mod h1:TQU0M1i/ImAo+tYpZi73AU3V/dKeCoMC9Sphe2ZwGME= github.com/tendermint/go-amino v0.16.0/go.mod h1:TQU0M1i/ImAo+tYpZi73AU3V/dKeCoMC9Sphe2ZwGME=
github.com/tetratelabs/wazero v1.8.2 h1:yIgLR/b2bN31bjxwXHD8a3d+BogigR952csSDdLYEv4=
github.com/tetratelabs/wazero v1.8.2/go.mod h1:yAI0XTsMBhREkM/YDAK/zNou3GoiAce1P6+rp/wQhjs=
github.com/tidwall/btree v1.7.0 h1:L1fkJH/AuEh5zBnnBbmTwQ5Lt+bRJ5A8EWecslvo9iI= github.com/tidwall/btree v1.7.0 h1:L1fkJH/AuEh5zBnnBbmTwQ5Lt+bRJ5A8EWecslvo9iI=
github.com/tidwall/btree v1.7.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY= github.com/tidwall/btree v1.7.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY=
github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
@ -3485,12 +3477,6 @@ gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C
gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gorm.io/driver/postgres v1.5.11 h1:ubBVAfbKEUld/twyKZ0IYn9rSQh448EdelLYk9Mv314=
gorm.io/driver/postgres v1.5.11/go.mod h1:DX3GReXH+3FPWGrrgffdvCk3DQ1dwDPdmbenSkweRGI=
gorm.io/driver/sqlite v1.5.6 h1:fO/X46qn5NUEEOZtnjJRWRzZMe8nqJiQ9E+0hi+hKQE=
gorm.io/driver/sqlite v1.5.6/go.mod h1:U+J8craQU6Fzkcvu8oLeAQmi50TkwPEhHDEjQZXDah4=
gorm.io/gorm v1.25.12 h1:I0u8i2hWQItBq1WfE0o2+WuL9+8L21K9e2HHSTE/0f8=
gorm.io/gorm v1.25.12/go.mod h1:xh7N7RHfYlNc5EmcI/El95gXusucDrQnHXe0+CgWcLQ=
gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU=
gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=
grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o=

View File

@ -4,7 +4,7 @@ import (
"context" "context"
"github.com/apple/pkl-go/pkl" "github.com/apple/pkl-go/pkl"
hwayconfig "github.com/onsonr/sonr/pkg/config/hway" hwayconfig "github.com/onsonr/sonr/internal/config/hway"
) )
// LoadFromBytes loads the environment from the given bytes // LoadFromBytes loads the environment from the given bytes

View File

@ -1,4 +1,4 @@
package common package context
import ( import (
"encoding/base64" "encoding/base64"

View File

@ -1,4 +1,4 @@
package common package context
import "github.com/labstack/echo/v4" import "github.com/labstack/echo/v4"

25
internal/database/conn.go Normal file
View File

@ -0,0 +1,25 @@
package database
import (
"context"
"database/sql"
_ "github.com/ncruces/go-sqlite3/driver"
_ "github.com/ncruces/go-sqlite3/embed"
config "github.com/onsonr/sonr/internal/config/hway"
"github.com/onsonr/sonr/internal/database/sink"
)
// NewDB initializes and returns a configured database connection
func NewDB(env config.Hway) (*sql.DB, error) {
db, err := sql.Open("sqlite3", ":memory:")
if err != nil {
return nil, err
}
// create tables
if _, err := db.ExecContext(context.Background(), sink.SchemaSQL); err != nil {
return nil, err
}
return db, nil
}

View File

@ -0,0 +1,31 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.27.0
package repository
import (
"context"
"database/sql"
)
type DBTX interface {
ExecContext(context.Context, string, ...interface{}) (sql.Result, error)
PrepareContext(context.Context, string) (*sql.Stmt, error)
QueryContext(context.Context, string, ...interface{}) (*sql.Rows, error)
QueryRowContext(context.Context, string, ...interface{}) *sql.Row
}
func New(db DBTX) *Queries {
return &Queries{db: db}
}
type Queries struct {
db DBTX
}
func (q *Queries) WithTx(tx *sql.Tx) *Queries {
return &Queries{
db: tx,
}
}

View File

@ -0,0 +1,99 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.27.0
package repository
import (
"database/sql"
"time"
)
type Account struct {
ID string
CreatedAt time.Time
UpdatedAt time.Time
DeletedAt sql.NullTime
Number int64
Sequence int64
Address string
PublicKey string
ChainID string
Controller string
IsSubsidiary bool
IsValidator bool
IsDelegator bool
IsAccountable bool
}
type Asset struct {
ID string
CreatedAt time.Time
UpdatedAt time.Time
DeletedAt sql.NullTime
Name string
Symbol string
Decimals int64
ChainID string
Channel string
AssetType string
CoingeckoID sql.NullString
}
type Credential struct {
ID int64
CreatedAt time.Time
UpdatedAt time.Time
DeletedAt sql.NullTime
Handle string
CredentialID string
AuthenticatorAttachment string
Origin string
Type string
Transports string
}
type Profile struct {
ID int64
CreatedAt time.Time
UpdatedAt time.Time
DeletedAt sql.NullTime
Address string
Handle string
Origin string
Name string
}
type Session struct {
ID string
CreatedAt time.Time
UpdatedAt time.Time
DeletedAt sql.NullTime
BrowserName string
BrowserVersion string
ClientIpaddr string
Platform string
IsDesktop bool
IsMobile bool
IsTablet bool
IsTv bool
IsBot bool
Challenge string
IsHumanFirst bool
IsHumanLast bool
ProfileID int64
}
type Vault struct {
ID int64
CreatedAt time.Time
UpdatedAt time.Time
DeletedAt sql.NullTime
Handle string
Origin string
Address string
Cid string
Config string
SessionID string
RedirectUri string
}

View File

@ -0,0 +1,581 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.27.0
// source: query.sql
package repository
import (
"context"
)
const checkHandleExists = `-- name: CheckHandleExists :one
SELECT COUNT(*) > 0 as handle_exists FROM profiles
WHERE handle = ?
AND deleted_at IS NULL
`
func (q *Queries) CheckHandleExists(ctx context.Context, handle string) (bool, error) {
row := q.db.QueryRowContext(ctx, checkHandleExists, handle)
var handle_exists bool
err := row.Scan(&handle_exists)
return handle_exists, err
}
const createSession = `-- name: CreateSession :one
INSERT INTO sessions (
id,
browser_name,
browser_version,
client_ipaddr,
platform,
is_desktop,
is_mobile,
is_tablet,
is_tv,
is_bot,
challenge,
is_human_first,
is_human_last,
profile_id
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? )
RETURNING id, created_at, updated_at, deleted_at, browser_name, browser_version, client_ipaddr, platform, is_desktop, is_mobile, is_tablet, is_tv, is_bot, challenge, is_human_first, is_human_last, profile_id
`
type CreateSessionParams struct {
ID string
BrowserName string
BrowserVersion string
ClientIpaddr string
Platform string
IsDesktop bool
IsMobile bool
IsTablet bool
IsTv bool
IsBot bool
Challenge string
IsHumanFirst bool
IsHumanLast bool
ProfileID int64
}
func (q *Queries) CreateSession(ctx context.Context, arg CreateSessionParams) (Session, error) {
row := q.db.QueryRowContext(ctx, createSession,
arg.ID,
arg.BrowserName,
arg.BrowserVersion,
arg.ClientIpaddr,
arg.Platform,
arg.IsDesktop,
arg.IsMobile,
arg.IsTablet,
arg.IsTv,
arg.IsBot,
arg.Challenge,
arg.IsHumanFirst,
arg.IsHumanLast,
arg.ProfileID,
)
var i Session
err := row.Scan(
&i.ID,
&i.CreatedAt,
&i.UpdatedAt,
&i.DeletedAt,
&i.BrowserName,
&i.BrowserVersion,
&i.ClientIpaddr,
&i.Platform,
&i.IsDesktop,
&i.IsMobile,
&i.IsTablet,
&i.IsTv,
&i.IsBot,
&i.Challenge,
&i.IsHumanFirst,
&i.IsHumanLast,
&i.ProfileID,
)
return i, err
}
const getChallengeBySessionID = `-- name: GetChallengeBySessionID :one
SELECT challenge FROM sessions
WHERE id = ? AND deleted_at IS NULL
LIMIT 1
`
func (q *Queries) GetChallengeBySessionID(ctx context.Context, id string) (string, error) {
row := q.db.QueryRowContext(ctx, getChallengeBySessionID, id)
var challenge string
err := row.Scan(&challenge)
return challenge, err
}
const getCredentialByID = `-- name: GetCredentialByID :one
SELECT id, created_at, updated_at, deleted_at, handle, credential_id, authenticator_attachment, origin, type, transports FROM credentials
WHERE credential_id = ?
AND deleted_at IS NULL
LIMIT 1
`
func (q *Queries) GetCredentialByID(ctx context.Context, credentialID string) (Credential, error) {
row := q.db.QueryRowContext(ctx, getCredentialByID, credentialID)
var i Credential
err := row.Scan(
&i.ID,
&i.CreatedAt,
&i.UpdatedAt,
&i.DeletedAt,
&i.Handle,
&i.CredentialID,
&i.AuthenticatorAttachment,
&i.Origin,
&i.Type,
&i.Transports,
)
return i, err
}
const getCredentialsByHandle = `-- name: GetCredentialsByHandle :many
SELECT id, created_at, updated_at, deleted_at, handle, credential_id, authenticator_attachment, origin, type, transports FROM credentials
WHERE handle = ?
AND deleted_at IS NULL
`
func (q *Queries) GetCredentialsByHandle(ctx context.Context, handle string) ([]Credential, error) {
rows, err := q.db.QueryContext(ctx, getCredentialsByHandle, handle)
if err != nil {
return nil, err
}
defer rows.Close()
var items []Credential
for rows.Next() {
var i Credential
if err := rows.Scan(
&i.ID,
&i.CreatedAt,
&i.UpdatedAt,
&i.DeletedAt,
&i.Handle,
&i.CredentialID,
&i.AuthenticatorAttachment,
&i.Origin,
&i.Type,
&i.Transports,
); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Close(); err != nil {
return nil, err
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const getHumanVerificationNumbers = `-- name: GetHumanVerificationNumbers :one
SELECT is_human_first, is_human_last FROM sessions
WHERE id = ? AND deleted_at IS NULL
LIMIT 1
`
type GetHumanVerificationNumbersRow struct {
IsHumanFirst bool
IsHumanLast bool
}
func (q *Queries) GetHumanVerificationNumbers(ctx context.Context, id string) (GetHumanVerificationNumbersRow, error) {
row := q.db.QueryRowContext(ctx, getHumanVerificationNumbers, id)
var i GetHumanVerificationNumbersRow
err := row.Scan(&i.IsHumanFirst, &i.IsHumanLast)
return i, err
}
const getProfileByAddress = `-- name: GetProfileByAddress :one
SELECT id, created_at, updated_at, deleted_at, address, handle, origin, name FROM profiles
WHERE address = ? AND deleted_at IS NULL
LIMIT 1
`
func (q *Queries) GetProfileByAddress(ctx context.Context, address string) (Profile, error) {
row := q.db.QueryRowContext(ctx, getProfileByAddress, address)
var i Profile
err := row.Scan(
&i.ID,
&i.CreatedAt,
&i.UpdatedAt,
&i.DeletedAt,
&i.Address,
&i.Handle,
&i.Origin,
&i.Name,
)
return i, err
}
const getProfileByHandle = `-- name: GetProfileByHandle :one
SELECT id, created_at, updated_at, deleted_at, address, handle, origin, name FROM profiles
WHERE handle = ?
AND deleted_at IS NULL
LIMIT 1
`
func (q *Queries) GetProfileByHandle(ctx context.Context, handle string) (Profile, error) {
row := q.db.QueryRowContext(ctx, getProfileByHandle, handle)
var i Profile
err := row.Scan(
&i.ID,
&i.CreatedAt,
&i.UpdatedAt,
&i.DeletedAt,
&i.Address,
&i.Handle,
&i.Origin,
&i.Name,
)
return i, err
}
const getProfileByID = `-- name: GetProfileByID :one
SELECT id, created_at, updated_at, deleted_at, address, handle, origin, name FROM profiles
WHERE id = ? AND deleted_at IS NULL
LIMIT 1
`
func (q *Queries) GetProfileByID(ctx context.Context, id int64) (Profile, error) {
row := q.db.QueryRowContext(ctx, getProfileByID, id)
var i Profile
err := row.Scan(
&i.ID,
&i.CreatedAt,
&i.UpdatedAt,
&i.DeletedAt,
&i.Address,
&i.Handle,
&i.Origin,
&i.Name,
)
return i, err
}
const getSessionByClientIP = `-- name: GetSessionByClientIP :one
SELECT id, created_at, updated_at, deleted_at, browser_name, browser_version, client_ipaddr, platform, is_desktop, is_mobile, is_tablet, is_tv, is_bot, challenge, is_human_first, is_human_last, profile_id FROM sessions
WHERE client_ipaddr = ? AND deleted_at IS NULL
LIMIT 1
`
func (q *Queries) GetSessionByClientIP(ctx context.Context, clientIpaddr string) (Session, error) {
row := q.db.QueryRowContext(ctx, getSessionByClientIP, clientIpaddr)
var i Session
err := row.Scan(
&i.ID,
&i.CreatedAt,
&i.UpdatedAt,
&i.DeletedAt,
&i.BrowserName,
&i.BrowserVersion,
&i.ClientIpaddr,
&i.Platform,
&i.IsDesktop,
&i.IsMobile,
&i.IsTablet,
&i.IsTv,
&i.IsBot,
&i.Challenge,
&i.IsHumanFirst,
&i.IsHumanLast,
&i.ProfileID,
)
return i, err
}
const getSessionByID = `-- name: GetSessionByID :one
SELECT id, created_at, updated_at, deleted_at, browser_name, browser_version, client_ipaddr, platform, is_desktop, is_mobile, is_tablet, is_tv, is_bot, challenge, is_human_first, is_human_last, profile_id FROM sessions
WHERE id = ? AND deleted_at IS NULL
LIMIT 1
`
func (q *Queries) GetSessionByID(ctx context.Context, id string) (Session, error) {
row := q.db.QueryRowContext(ctx, getSessionByID, id)
var i Session
err := row.Scan(
&i.ID,
&i.CreatedAt,
&i.UpdatedAt,
&i.DeletedAt,
&i.BrowserName,
&i.BrowserVersion,
&i.ClientIpaddr,
&i.Platform,
&i.IsDesktop,
&i.IsMobile,
&i.IsTablet,
&i.IsTv,
&i.IsBot,
&i.Challenge,
&i.IsHumanFirst,
&i.IsHumanLast,
&i.ProfileID,
)
return i, err
}
const getVaultConfigByCID = `-- name: GetVaultConfigByCID :one
SELECT id, created_at, updated_at, deleted_at, handle, origin, address, cid, config, session_id, redirect_uri FROM vaults
WHERE cid = ?
AND deleted_at IS NULL
LIMIT 1
`
func (q *Queries) GetVaultConfigByCID(ctx context.Context, cid string) (Vault, error) {
row := q.db.QueryRowContext(ctx, getVaultConfigByCID, cid)
var i Vault
err := row.Scan(
&i.ID,
&i.CreatedAt,
&i.UpdatedAt,
&i.DeletedAt,
&i.Handle,
&i.Origin,
&i.Address,
&i.Cid,
&i.Config,
&i.SessionID,
&i.RedirectUri,
)
return i, err
}
const getVaultRedirectURIBySessionID = `-- name: GetVaultRedirectURIBySessionID :one
SELECT redirect_uri FROM vaults
WHERE session_id = ?
AND deleted_at IS NULL
LIMIT 1
`
func (q *Queries) GetVaultRedirectURIBySessionID(ctx context.Context, sessionID string) (string, error) {
row := q.db.QueryRowContext(ctx, getVaultRedirectURIBySessionID, sessionID)
var redirect_uri string
err := row.Scan(&redirect_uri)
return redirect_uri, err
}
const insertCredential = `-- name: InsertCredential :one
INSERT INTO credentials (
handle,
credential_id,
origin,
type,
transports
) VALUES (?, ?, ?, ?, ?)
RETURNING id, created_at, updated_at, deleted_at, handle, credential_id, authenticator_attachment, origin, type, transports
`
type InsertCredentialParams struct {
Handle string
CredentialID string
Origin string
Type string
Transports string
}
func (q *Queries) InsertCredential(ctx context.Context, arg InsertCredentialParams) (Credential, error) {
row := q.db.QueryRowContext(ctx, insertCredential,
arg.Handle,
arg.CredentialID,
arg.Origin,
arg.Type,
arg.Transports,
)
var i Credential
err := row.Scan(
&i.ID,
&i.CreatedAt,
&i.UpdatedAt,
&i.DeletedAt,
&i.Handle,
&i.CredentialID,
&i.AuthenticatorAttachment,
&i.Origin,
&i.Type,
&i.Transports,
)
return i, err
}
const insertProfile = `-- name: InsertProfile :one
INSERT INTO profiles (
address,
handle,
origin,
name
) VALUES (?, ?, ?, ?)
RETURNING id, created_at, updated_at, deleted_at, address, handle, origin, name
`
type InsertProfileParams struct {
Address string
Handle string
Origin string
Name string
}
func (q *Queries) InsertProfile(ctx context.Context, arg InsertProfileParams) (Profile, error) {
row := q.db.QueryRowContext(ctx, insertProfile,
arg.Address,
arg.Handle,
arg.Origin,
arg.Name,
)
var i Profile
err := row.Scan(
&i.ID,
&i.CreatedAt,
&i.UpdatedAt,
&i.DeletedAt,
&i.Address,
&i.Handle,
&i.Origin,
&i.Name,
)
return i, err
}
const softDeleteCredential = `-- name: SoftDeleteCredential :exec
UPDATE credentials
SET deleted_at = CURRENT_TIMESTAMP
WHERE credential_id = ?
`
func (q *Queries) SoftDeleteCredential(ctx context.Context, credentialID string) error {
_, err := q.db.ExecContext(ctx, softDeleteCredential, credentialID)
return err
}
const softDeleteProfile = `-- name: SoftDeleteProfile :exec
UPDATE profiles
SET deleted_at = CURRENT_TIMESTAMP
WHERE address = ?
`
func (q *Queries) SoftDeleteProfile(ctx context.Context, address string) error {
_, err := q.db.ExecContext(ctx, softDeleteProfile, address)
return err
}
const updateProfile = `-- name: UpdateProfile :one
UPDATE profiles
SET
name = ?,
handle = ?,
updated_at = CURRENT_TIMESTAMP
WHERE address = ?
AND deleted_at IS NULL
RETURNING id, created_at, updated_at, deleted_at, address, handle, origin, name
`
type UpdateProfileParams struct {
Name string
Handle string
Address string
}
func (q *Queries) UpdateProfile(ctx context.Context, arg UpdateProfileParams) (Profile, error) {
row := q.db.QueryRowContext(ctx, updateProfile, arg.Name, arg.Handle, arg.Address)
var i Profile
err := row.Scan(
&i.ID,
&i.CreatedAt,
&i.UpdatedAt,
&i.DeletedAt,
&i.Address,
&i.Handle,
&i.Origin,
&i.Name,
)
return i, err
}
const updateSessionHumanVerification = `-- name: UpdateSessionHumanVerification :one
UPDATE sessions
SET
is_human_first = ?,
is_human_last = ?,
updated_at = CURRENT_TIMESTAMP
WHERE id = ?
RETURNING id, created_at, updated_at, deleted_at, browser_name, browser_version, client_ipaddr, platform, is_desktop, is_mobile, is_tablet, is_tv, is_bot, challenge, is_human_first, is_human_last, profile_id
`
type UpdateSessionHumanVerificationParams struct {
IsHumanFirst bool
IsHumanLast bool
ID string
}
func (q *Queries) UpdateSessionHumanVerification(ctx context.Context, arg UpdateSessionHumanVerificationParams) (Session, error) {
row := q.db.QueryRowContext(ctx, updateSessionHumanVerification, arg.IsHumanFirst, arg.IsHumanLast, arg.ID)
var i Session
err := row.Scan(
&i.ID,
&i.CreatedAt,
&i.UpdatedAt,
&i.DeletedAt,
&i.BrowserName,
&i.BrowserVersion,
&i.ClientIpaddr,
&i.Platform,
&i.IsDesktop,
&i.IsMobile,
&i.IsTablet,
&i.IsTv,
&i.IsBot,
&i.Challenge,
&i.IsHumanFirst,
&i.IsHumanLast,
&i.ProfileID,
)
return i, err
}
const updateSessionWithProfileID = `-- name: UpdateSessionWithProfileID :one
UPDATE sessions
SET
profile_id = ?,
updated_at = CURRENT_TIMESTAMP
WHERE id = ?
RETURNING id, created_at, updated_at, deleted_at, browser_name, browser_version, client_ipaddr, platform, is_desktop, is_mobile, is_tablet, is_tv, is_bot, challenge, is_human_first, is_human_last, profile_id
`
type UpdateSessionWithProfileIDParams struct {
ProfileID int64
ID string
}
func (q *Queries) UpdateSessionWithProfileID(ctx context.Context, arg UpdateSessionWithProfileIDParams) (Session, error) {
row := q.db.QueryRowContext(ctx, updateSessionWithProfileID, arg.ProfileID, arg.ID)
var i Session
err := row.Scan(
&i.ID,
&i.CreatedAt,
&i.UpdatedAt,
&i.DeletedAt,
&i.BrowserName,
&i.BrowserVersion,
&i.ClientIpaddr,
&i.Platform,
&i.IsDesktop,
&i.IsMobile,
&i.IsTablet,
&i.IsTv,
&i.IsBot,
&i.Challenge,
&i.IsHumanFirst,
&i.IsHumanLast,
&i.ProfileID,
)
return i, err
}

View File

@ -0,0 +1,58 @@
package database
import (
ctx "github.com/onsonr/sonr/internal/context"
"github.com/go-webauthn/webauthn/protocol"
"github.com/labstack/echo/v4"
"github.com/medama-io/go-useragent"
"github.com/onsonr/sonr/internal/database/repository"
"github.com/segmentio/ksuid"
)
func BaseSessionCreateParams(e echo.Context) repository.CreateSessionParams {
// f := rand.Intn(5) + 1
// l := rand.Intn(4) + 1
challenge, _ := protocol.CreateChallenge()
id := getOrCreateSessionID(e)
ua := useragent.NewParser()
s := ua.Parse(e.Request().UserAgent())
return repository.CreateSessionParams{
ID: id,
BrowserName: s.GetBrowser(),
BrowserVersion: s.GetMajorVersion(),
ClientIpaddr: e.RealIP(),
Platform: s.GetOS(),
IsMobile: s.IsMobile(),
IsTablet: s.IsTablet(),
IsDesktop: s.IsDesktop(),
IsBot: s.IsBot(),
IsTv: s.IsTV(),
// IsHumanFirst: int64(f),
// IsHumanLast: int64(l),
Challenge: challenge.String(),
}
}
func getOrCreateSessionID(c echo.Context) string {
if ok := ctx.CookieExists(c, ctx.SessionID); !ok {
sessionID := ksuid.New().String()
ctx.WriteCookie(c, ctx.SessionID, sessionID)
return sessionID
}
sessionID, err := ctx.ReadCookie(c, ctx.SessionID)
if err != nil {
sessionID = ksuid.New().String()
ctx.WriteCookie(c, ctx.SessionID, sessionID)
}
return sessionID
}
func boolToInt64(b bool) int64 {
if b {
return 1
}
return 0
}

View File

@ -0,0 +1,8 @@
package sink
import (
_ "embed"
)
//go:embed schema.sql
var SchemaSQL string

View File

@ -0,0 +1,138 @@
-- name: InsertCredential :one
INSERT INTO credentials (
handle,
credential_id,
origin,
type,
transports
) VALUES (?, ?, ?, ?, ?)
RETURNING *;
-- name: InsertProfile :one
INSERT INTO profiles (
address,
handle,
origin,
name
) VALUES (?, ?, ?, ?)
RETURNING *;
-- name: GetProfileByID :one
SELECT * FROM profiles
WHERE id = ? AND deleted_at IS NULL
LIMIT 1;
-- name: GetProfileByAddress :one
SELECT * FROM profiles
WHERE address = ? AND deleted_at IS NULL
LIMIT 1;
-- name: GetChallengeBySessionID :one
SELECT challenge FROM sessions
WHERE id = ? AND deleted_at IS NULL
LIMIT 1;
-- name: GetHumanVerificationNumbers :one
SELECT is_human_first, is_human_last FROM sessions
WHERE id = ? AND deleted_at IS NULL
LIMIT 1;
-- name: GetSessionByID :one
SELECT * FROM sessions
WHERE id = ? AND deleted_at IS NULL
LIMIT 1;
-- name: GetSessionByClientIP :one
SELECT * FROM sessions
WHERE client_ipaddr = ? AND deleted_at IS NULL
LIMIT 1;
-- name: UpdateSessionHumanVerification :one
UPDATE sessions
SET
is_human_first = ?,
is_human_last = ?,
updated_at = CURRENT_TIMESTAMP
WHERE id = ?
RETURNING *;
-- name: UpdateSessionWithProfileID :one
UPDATE sessions
SET
profile_id = ?,
updated_at = CURRENT_TIMESTAMP
WHERE id = ?
RETURNING *;
-- name: CheckHandleExists :one
SELECT COUNT(*) > 0 as handle_exists FROM profiles
WHERE handle = ?
AND deleted_at IS NULL;
-- name: GetCredentialsByHandle :many
SELECT * FROM credentials
WHERE handle = ?
AND deleted_at IS NULL;
-- name: GetCredentialByID :one
SELECT * FROM credentials
WHERE credential_id = ?
AND deleted_at IS NULL
LIMIT 1;
-- name: SoftDeleteCredential :exec
UPDATE credentials
SET deleted_at = CURRENT_TIMESTAMP
WHERE credential_id = ?;
-- name: SoftDeleteProfile :exec
UPDATE profiles
SET deleted_at = CURRENT_TIMESTAMP
WHERE address = ?;
-- name: UpdateProfile :one
UPDATE profiles
SET
name = ?,
handle = ?,
updated_at = CURRENT_TIMESTAMP
WHERE address = ?
AND deleted_at IS NULL
RETURNING *;
-- name: GetProfileByHandle :one
SELECT * FROM profiles
WHERE handle = ?
AND deleted_at IS NULL
LIMIT 1;
-- name: GetVaultConfigByCID :one
SELECT * FROM vaults
WHERE cid = ?
AND deleted_at IS NULL
LIMIT 1;
-- name: GetVaultRedirectURIBySessionID :one
SELECT redirect_uri FROM vaults
WHERE session_id = ?
AND deleted_at IS NULL
LIMIT 1;
-- name: CreateSession :one
INSERT INTO sessions (
id,
browser_name,
browser_version,
client_ipaddr,
platform,
is_desktop,
is_mobile,
is_tablet,
is_tv,
is_bot,
challenge,
is_human_first,
is_human_last,
profile_id
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? )
RETURNING *;

View File

@ -0,0 +1,121 @@
-- Profiles represent user identities
CREATE TABLE profiles (
id TEXT PRIMARY KEY,
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
deleted_at TIMESTAMP,
address TEXT NOT NULL,
handle TEXT NOT NULL UNIQUE,
origin TEXT NOT NULL,
name TEXT NOT NULL,
UNIQUE(address, origin)
);
-- Accounts represent blockchain accounts
CREATE TABLE accounts (
id TEXT PRIMARY KEY,
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
deleted_at TIMESTAMP,
number INTEGER NOT NULL,
sequence INTEGER NOT NULL DEFAULT 0,
address TEXT NOT NULL UNIQUE,
public_key TEXT NOT NULL CHECK(json_valid(public_key)),
chain_id TEXT NOT NULL,
controller TEXT NOT NULL,
is_subsidiary BOOLEAN NOT NULL DEFAULT FALSE CHECK(is_subsidiary IN (0,1)),
is_validator BOOLEAN NOT NULL DEFAULT FALSE CHECK(is_validator IN (0,1)),
is_delegator BOOLEAN NOT NULL DEFAULT FALSE CHECK(is_delegator IN (0,1)),
is_accountable BOOLEAN NOT NULL DEFAULT TRUE CHECK(is_accountable IN (0,1))
);
-- Assets represent tokens and coins
CREATE TABLE assets (
id TEXT PRIMARY KEY,
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
deleted_at TIMESTAMP,
name TEXT NOT NULL,
symbol TEXT NOT NULL,
decimals INTEGER NOT NULL CHECK(decimals >= 0),
chain_id TEXT NOT NULL,
channel TEXT NOT NULL,
asset_type TEXT NOT NULL,
coingecko_id TEXT,
UNIQUE(chain_id, symbol)
);
-- Credentials store WebAuthn credentials
CREATE TABLE credentials (
id TEXT PRIMARY KEY,
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
deleted_at TIMESTAMP,
handle TEXT NOT NULL,
credential_id TEXT NOT NULL UNIQUE,
authenticator_attachment TEXT NOT NULL,
origin TEXT NOT NULL,
type TEXT NOT NULL,
transports TEXT NOT NULL
);
-- Sessions track user authentication state
CREATE TABLE sessions (
id TEXT PRIMARY KEY,
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
deleted_at TIMESTAMP,
browser_name TEXT NOT NULL,
browser_version TEXT NOT NULL,
client_ipaddr TEXT NOT NULL,
platform TEXT NOT NULL,
is_desktop BOOLEAN NOT NULL DEFAULT FALSE CHECK(is_desktop IN (0,1)),
is_mobile BOOLEAN NOT NULL DEFAULT FALSE CHECK(is_mobile IN (0,1)),
is_tablet BOOLEAN NOT NULL DEFAULT FALSE CHECK(is_tablet IN (0,1)),
is_tv BOOLEAN NOT NULL DEFAULT FALSE CHECK(is_tv IN (0,1)),
is_bot BOOLEAN NOT NULL DEFAULT FALSE CHECK(is_bot IN (0,1)),
challenge TEXT NOT NULL,
is_human_first BOOLEAN NOT NULL DEFAULT FALSE CHECK(is_human_first IN (0,1)),
is_human_last BOOLEAN NOT NULL DEFAULT FALSE CHECK(is_human_last IN (0,1)),
profile_id INTEGER NOT NULL
);
-- Vaults store encrypted data
CREATE TABLE vaults (
id TEXT PRIMARY KEY,
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
deleted_at TIMESTAMP,
handle TEXT NOT NULL,
origin TEXT NOT NULL,
address TEXT NOT NULL,
cid TEXT NOT NULL UNIQUE,
config TEXT NOT NULL CHECK(json_valid(config)),
session_id TEXT NOT NULL,
redirect_uri TEXT NOT NULL
);
-- Indexes for common queries
CREATE INDEX idx_profiles_handle ON profiles(handle);
CREATE INDEX idx_profiles_address ON profiles(address);
CREATE INDEX idx_profiles_deleted_at ON profiles(deleted_at);
CREATE INDEX idx_accounts_address ON accounts(address);
CREATE INDEX idx_accounts_chain_id ON accounts(chain_id);
CREATE INDEX idx_accounts_deleted_at ON accounts(deleted_at);
CREATE INDEX idx_assets_symbol ON assets(symbol);
CREATE INDEX idx_assets_chain_id ON assets(chain_id);
CREATE INDEX idx_assets_deleted_at ON assets(deleted_at);
CREATE INDEX idx_credentials_handle ON credentials(handle);
CREATE INDEX idx_credentials_origin ON credentials(origin);
CREATE INDEX idx_credentials_deleted_at ON credentials(deleted_at);
CREATE INDEX idx_sessions_profile_id ON sessions(profile_id);
CREATE INDEX idx_sessions_client_ipaddr ON sessions(client_ipaddr);
CREATE INDEX idx_sessions_deleted_at ON sessions(deleted_at);
CREATE INDEX idx_vaults_handle ON vaults(handle);
CREATE INDEX idx_vaults_session_id ON vaults(session_id);
CREATE INDEX idx_vaults_deleted_at ON vaults(deleted_at);

View File

@ -0,0 +1,9 @@
version: "2"
sql:
- engine: "sqlite"
queries: "./sink/query.sql"
schema: "./sink/schema.sql"
gen:
go:
package: "repository"
out: "repository"

View File

@ -1,50 +0,0 @@
package context
import (
"github.com/onsonr/sonr/internal/gateway/models"
"github.com/onsonr/sonr/pkg/common"
"github.com/segmentio/ksuid"
)
// initSession initializes or loads an existing session
func (s *HTTPContext) initSession() error {
sessionID := s.getOrCreateSessionID()
// Try to load existing session
var sess models.Session
result := s.db.Where("id = ?", sessionID).First(&sess)
if result.Error != nil {
// Create new session if not found
sess = models.Session{
ID: sessionID,
BrowserName: s.GetBrowser(),
BrowserVersion: s.GetMajorVersion(),
Platform: s.GetOS(),
IsMobile: s.IsMobile(),
IsTablet: s.IsTablet(),
IsDesktop: s.IsDesktop(),
IsBot: s.IsBot(),
IsTV: s.IsTV(),
}
if err := s.db.Create(&sess).Error; err != nil {
return err
}
}
s.sess = &sess
return nil
}
func (s *HTTPContext) getOrCreateSessionID() string {
if ok := common.CookieExists(s.Context, common.SessionID); !ok {
sessionID := ksuid.New().String()
common.WriteCookie(s.Context, common.SessionID, sessionID)
return sessionID
}
sessionID, err := common.ReadCookie(s.Context, common.SessionID)
if err != nil {
sessionID = ksuid.New().String()
common.WriteCookie(s.Context, common.SessionID, sessionID)
}
return sessionID
}

View File

@ -1,63 +0,0 @@
package context
import (
"net/http"
"github.com/labstack/echo/v4"
"github.com/medama-io/go-useragent"
"github.com/onsonr/sonr/internal/gateway/models"
"github.com/onsonr/sonr/internal/gateway/services"
config "github.com/onsonr/sonr/pkg/config/hway"
"gorm.io/gorm"
)
// Middleware creates a new session middleware
func Middleware(db *gorm.DB, env config.Hway) echo.MiddlewareFunc {
ua := useragent.NewParser()
return func(next echo.HandlerFunc) echo.HandlerFunc {
return func(c echo.Context) error {
agent := ua.Parse(c.Request().UserAgent())
cc := NewHTTPContext(c, db, agent, env.GetSonrGrpcUrl())
if err := cc.initSession(); err != nil {
return err
}
return next(cc)
}
}
}
// HTTPContext is the context for HTTP endpoints.
type HTTPContext struct {
echo.Context
*services.ResolverService
db *gorm.DB
sess *models.Session
user *models.User
env config.Hway
useragent.UserAgent
}
// Get returns the HTTPContext from the echo context
func Get(c echo.Context) (*HTTPContext, error) {
ctx, ok := c.(*HTTPContext)
if !ok {
return nil, echo.NewHTTPError(http.StatusInternalServerError, "Session Context not found")
}
return ctx, nil
}
// NewHTTPContext creates a new session context
func NewHTTPContext(c echo.Context, db *gorm.DB, a useragent.UserAgent, grpcAddr string) *HTTPContext {
rsv := services.NewResolverService(grpcAddr)
return &HTTPContext{
Context: c,
db: db,
ResolverService: rsv,
UserAgent: a,
}
}
// Session returns the current session
func (s *HTTPContext) Session() *models.Session {
return s.sess
}

View File

@ -1,58 +0,0 @@
package context
import (
"fmt"
"github.com/labstack/echo/v4"
"github.com/onsonr/sonr/internal/gateway/models"
)
func InsertCredential(c echo.Context, handle string, cred *models.CredentialDescriptor) error {
sess, err := Get(c)
if err != nil {
return err
}
return sess.db.Save(cred.ToDBModel(handle, c.Request().Host)).Error
}
func InsertProfile(c echo.Context) error {
sess, err := Get(c)
if err != nil {
return err
}
handle := c.FormValue("handle")
firstName := c.FormValue("first_name")
lastName := c.FormValue("last_name")
return sess.db.Save(&models.User{
Handle: handle,
Name: fmt.Sprintf("%s %s", firstName, lastName),
}).Error
}
// ╭───────────────────────────────────────────────────────╮
// │ DB Getter Functions │
// ╰───────────────────────────────────────────────────────╯
// SessionID returns the session ID
func SessionID(c echo.Context) (string, error) {
sess, err := Get(c)
if err != nil {
return "", err
}
return sess.Session().ID, nil
}
// HandleExists checks if a handle already exists in any session
func HandleExists(c echo.Context, handle string) (bool, error) {
sess, err := Get(c)
if err != nil {
return false, err
}
var count int64
if err := sess.db.Model(&models.User{}).Where("handle = ?", handle).Count(&count).Error; err != nil {
return false, err
}
return count > 0, nil
}

View File

@ -1,21 +0,0 @@
package handlers
import (
"github.com/labstack/echo/v4"
"github.com/onsonr/sonr/internal/gateway/context"
"github.com/onsonr/sonr/internal/gateway/views"
"github.com/onsonr/sonr/pkg/common/response"
)
func RenderIndex(c echo.Context) error {
return response.TemplEcho(c, views.InitialView(isUnavailableDevice(c)))
}
// isUnavailableDevice returns true if the device is unavailable
func isUnavailableDevice(c echo.Context) bool {
s, err := context.Get(c)
if err != nil {
return true
}
return s.IsBot() || s.IsTV()
}

View File

@ -1,54 +0,0 @@
package handlers
import (
"fmt"
"net/http"
"github.com/go-webauthn/webauthn/protocol"
"github.com/labstack/echo/v4"
"github.com/onsonr/sonr/crypto/mpc"
"github.com/onsonr/sonr/internal/gateway/models"
"github.com/onsonr/sonr/internal/gateway/views"
"github.com/onsonr/sonr/pkg/common/response"
"golang.org/x/exp/rand"
)
func RenderProfileCreate(c echo.Context) error {
d := models.CreateProfileData{
FirstNumber: rand.Intn(5) + 1,
LastNumber: rand.Intn(4) + 1,
}
return response.TemplEcho(c, views.CreateProfileForm(d))
}
func RenderPasskeyCreate(c echo.Context) error {
challenge, _ := protocol.CreateChallenge()
handle := c.FormValue("handle")
firstName := c.FormValue("first_name")
lastName := c.FormValue("last_name")
ks, err := mpc.GenEnclave()
if err != nil {
return echo.NewHTTPError(http.StatusInternalServerError, err.Error())
}
dat := models.CreatePasskeyData{
Address: ks.Address(),
Handle: handle,
Name: fmt.Sprintf("%s %s", firstName, lastName),
Challenge: challenge.String(),
CreationBlock: "00001",
}
return response.TemplEcho(c, views.CreatePasskeyForm(dat))
}
func RenderVaultLoading(c echo.Context) error {
credentialJSON := c.FormValue("credential")
if credentialJSON == "" {
return echo.NewHTTPError(http.StatusBadRequest, "missing credential data")
}
_, err := models.ExtractCredentialDescriptor(credentialJSON)
if err != nil {
return err
}
return response.TemplEcho(c, views.LoadingVaultView())
}

View File

@ -1,11 +0,0 @@
package handlers
import (
"github.com/labstack/echo/v4"
)
// ValidateCredentialSubmit finds the user credential and validates it against the
// session challenge
func ValidateCredentialSubmit(c echo.Context) error {
return nil
}

View File

@ -1,8 +0,0 @@
package handlers
import "github.com/labstack/echo/v4"
// ValidateProfileHandle finds the chosen handle and verifies it is unique
func ValidateProfileSubmit(c echo.Context) error {
return nil
}

View File

@ -1,37 +0,0 @@
package models
import (
"gorm.io/gorm"
)
type Credential struct {
gorm.Model
Handle string `json:"handle"`
ID string `json:"id"`
Origin string `json:"origin"`
Type string `json:"type"`
Transports string `json:"transports"`
}
type Session struct {
gorm.Model
ID string `json:"id" gorm:"primaryKey"`
BrowserName string `json:"browserName"`
BrowserVersion string `json:"browserVersion"`
Platform string `json:"platform"`
IsDesktop bool `json:"isDesktop"`
IsMobile bool `json:"isMobile"`
IsTablet bool `json:"isTablet"`
IsTV bool `json:"isTV"`
IsBot bool `json:"isBot"`
Challenge string `json:"challenge"`
}
type User struct {
gorm.Model
Address string `json:"address"`
Handle string `json:"handle"`
Origin string `json:"origin"`
Name string `json:"name"`
CID string `json:"cid"`
}

View File

@ -1,15 +0,0 @@
package models
type CreatePasskeyData struct {
Address string
Handle string
Name string
Challenge string
CreationBlock string
}
type CreateProfileData struct {
TurnstileSiteKey string
FirstNumber int
LastNumber int
}

View File

@ -1 +0,0 @@
package models

View File

@ -1,64 +0,0 @@
package models
import (
"encoding/json"
"fmt"
)
// Define the credential structure matching our frontend data
type CredentialDescriptor struct {
ID string `json:"id"`
RawID string `json:"rawId"`
Type string `json:"type"`
AuthenticatorAttachment string `json:"authenticatorAttachment"`
Transports string `json:"transports"`
ClientExtensionResults map[string]string `json:"clientExtensionResults"`
Response struct {
AttestationObject string `json:"attestationObject"`
ClientDataJSON string `json:"clientDataJSON"`
} `json:"response"`
}
func (c *CredentialDescriptor) ToDBModel(handle, origin string) *Credential {
return &Credential{
Handle: handle,
Origin: origin,
ID: c.ID,
Type: c.Type,
Transports: c.Transports,
}
}
func ExtractCredentialDescriptor(jsonString string) (*CredentialDescriptor, error) {
cred := &CredentialDescriptor{}
// Unmarshal the credential JSON
if err := json.Unmarshal([]byte(jsonString), cred); err != nil {
return nil, err
}
// Validate required fields
if cred.ID == "" || cred.RawID == "" {
return nil, fmt.Errorf("missing credential ID")
}
if cred.Type != "public-key" {
return nil, fmt.Errorf("invalid credential type")
}
if cred.Response.AttestationObject == "" || cred.Response.ClientDataJSON == "" {
return nil, fmt.Errorf("missing attestation data")
}
// Log detailed credential information
fmt.Printf("Credential Details:\n"+
"ID: %s\n"+
"Raw ID: %s\n"+
"Type: %s\n"+
"Authenticator Attachment: %s\n"+
"Transports: %v\n"+
cred.ID,
cred.RawID,
cred.Type,
cred.AuthenticatorAttachment,
cred.Transports,
)
return cred, nil
}

View File

@ -1,81 +0,0 @@
// Package gateway provides the default routes for the Sonr hway.
package gateway
import (
"os"
"path/filepath"
"strings"
"github.com/labstack/echo/v4"
"github.com/onsonr/sonr/internal/gateway/context"
"github.com/onsonr/sonr/internal/gateway/handlers"
"github.com/onsonr/sonr/internal/gateway/models"
"github.com/onsonr/sonr/pkg/common/response"
config "github.com/onsonr/sonr/pkg/config/hway"
"gorm.io/driver/postgres"
"gorm.io/driver/sqlite"
"gorm.io/gorm"
)
func RegisterRoutes(e *echo.Echo, env config.Hway, db *gorm.DB) error {
// Custom error handler for gateway
e.HTTPErrorHandler = response.RedirectOnError("http://localhost:3000")
// Inject session middleware with database connection
e.Use(context.Middleware(db, env))
// Register View Handlers
e.GET("/", handlers.RenderIndex)
e.GET("/register", handlers.RenderProfileCreate)
e.POST("/register/passkey", handlers.RenderPasskeyCreate)
e.POST("/register/loading", handlers.RenderVaultLoading)
// Register Validation Handlers
e.PUT("/register/profile/submit", handlers.ValidateProfileSubmit)
e.PUT("/register/passkey/submit", handlers.ValidateCredentialSubmit)
return nil
}
// NewGormDB initializes and returns a configured database connection
func NewDB(env config.Hway) (*gorm.DB, error) {
// Try PostgreSQL first if DSN is provided
if dsn := env.GetPsqlDSN(); dsn != "" && !strings.Contains(dsn, "password= ") {
db, err := gorm.Open(postgres.Open(dsn), &gorm.Config{})
if err == nil {
// Test the connection
sqlDB, err := db.DB()
if err == nil {
if err = sqlDB.Ping(); err == nil {
// Successfully connected to PostgreSQL
db.AutoMigrate(&models.Credential{})
db.AutoMigrate(&models.Session{})
db.AutoMigrate(&models.User{})
return db, nil
}
}
}
}
// Fall back to SQLite
path := formatDBPath(env.GetSqliteFile())
db, err := gorm.Open(sqlite.Open(path), &gorm.Config{})
if err != nil {
return nil, err
}
// Migrate the schema
db.AutoMigrate(&models.Credential{})
db.AutoMigrate(&models.Session{})
db.AutoMigrate(&models.User{})
return db, nil
}
func formatDBPath(fileName string) string {
configDir := filepath.Join(os.Getenv("XDG_CONFIG_HOME"), "hway")
if err := os.MkdirAll(configDir, 0o755); err != nil {
// If we can't create the directory, fall back to current directory
return configDir
}
return filepath.Join(configDir, fileName)
}

View File

@ -1,59 +0,0 @@
package services
import (
bankv1beta1 "cosmossdk.io/api/cosmos/bank/v1beta1"
didv1 "github.com/onsonr/sonr/api/did/v1"
dwnv1 "github.com/onsonr/sonr/api/dwn/v1"
svcv1 "github.com/onsonr/sonr/api/svc/v1"
"google.golang.org/grpc"
)
type ResolverService struct {
grpcAddr string
}
func NewResolverService(grpcAddr string) *ResolverService {
return &ResolverService{
grpcAddr: grpcAddr,
}
}
func (s *ResolverService) getClientConn() (*grpc.ClientConn, error) {
grpcConn, err := grpc.NewClient(s.grpcAddr, grpc.WithInsecure())
if err != nil {
return nil, err
}
return grpcConn, nil
}
func (s *ResolverService) BankQuery() (bankv1beta1.QueryClient, error) {
conn, err := s.getClientConn()
if err != nil {
return nil, err
}
return bankv1beta1.NewQueryClient(conn), nil
}
func (s *ResolverService) DIDQuery() (didv1.QueryClient, error) {
conn, err := s.getClientConn()
if err != nil {
return nil, err
}
return didv1.NewQueryClient(conn), nil
}
func (s *ResolverService) DWNQuery() (dwnv1.QueryClient, error) {
conn, err := s.getClientConn()
if err != nil {
return nil, err
}
return dwnv1.NewQueryClient(conn), nil
}
func (s *ResolverService) SVCQuery() (svcv1.QueryClient, error) {
conn, err := s.getClientConn()
if err != nil {
return nil, err
}
return svcv1.NewQueryClient(conn), nil
}

View File

@ -1,7 +0,0 @@
package services
import "gorm.io/gorm"
type UserService struct {
db *gorm.DB
}

View File

@ -1,18 +0,0 @@
package services
import (
"github.com/onsonr/sonr/pkg/ipfsapi"
"gorm.io/gorm"
)
type VaultService struct {
db *gorm.DB
tokenStore ipfsapi.IPFSTokenStore
}
func NewVaultService(db *gorm.DB, ipc ipfsapi.Client) *VaultService {
return &VaultService{
db: db,
tokenStore: ipfsapi.NewUCANStore(ipc),
}
}

View File

@ -1,69 +0,0 @@
package views
import (
"github.com/onsonr/sonr/internal/gateway/models"
"github.com/onsonr/sonr/internal/nebula/card"
"github.com/onsonr/sonr/internal/nebula/form"
"github.com/onsonr/sonr/internal/nebula/hero"
"github.com/onsonr/sonr/internal/nebula/input"
"github.com/onsonr/sonr/internal/nebula/layout"
)
templ CreateProfileForm(data models.CreateProfileData) {
@layout.View("New Profile | Sonr.ID") {
@layout.Container() {
@hero.TitleDesc("Basic Info", "Tell us a little about yourself.")
@formCreateProfile(data)
}
}
}
templ CreatePasskeyForm(data models.CreatePasskeyData) {
@layout.View("Register | Sonr.ID") {
@layout.Container() {
@hero.TitleDesc("Link a PassKey", "This will be used to login to your vault.")
@formCreatePasskey(data)
}
}
}
templ LoadingVaultView() {
@layout.View("Loading... | Sonr.ID") {
@layout.Container() {
@hero.TitleDesc("Loading Vault", "This will be used to login to your vault.")
}
}
}
templ formCreatePasskey(data models.CreatePasskeyData) {
@form.Root("/register/finish", "POST", "passkey-form") {
<input type="hidden" name="credential" id="credential-data" required/>
@form.Body() {
@form.Header() {
@card.SonrProfile(data.Address, data.Name, data.Handle, data.CreationBlock)
}
@input.CoinSelect()
@form.Footer() {
@input.Passkey(data.Address, data.Handle, data.Challenge)
@form.CancelButton()
}
}
}
}
templ formCreateProfile(data models.CreateProfileData) {
@form.Root("/register/passkey", "POST", "create-profile") {
@form.Body() {
@form.Header() {
<sl-progress-bar value="50"></sl-progress-bar>
}
@input.Name()
@input.Handle()
@input.HumanSlider(data.FirstNumber, data.LastNumber)
@form.Footer() {
@form.CancelButton()
@form.SubmitButton("Next")
}
}
}
}

View File

@ -1,501 +0,0 @@
// Code generated by templ - DO NOT EDIT.
// templ: version: v0.2.793
package views
//lint:file-ignore SA4006 This context is only used if a nested component is present.
import "github.com/a-h/templ"
import templruntime "github.com/a-h/templ/runtime"
import (
"github.com/onsonr/sonr/internal/gateway/models"
"github.com/onsonr/sonr/internal/nebula/card"
"github.com/onsonr/sonr/internal/nebula/form"
"github.com/onsonr/sonr/internal/nebula/hero"
"github.com/onsonr/sonr/internal/nebula/input"
"github.com/onsonr/sonr/internal/nebula/layout"
)
func CreateProfileForm(data models.CreateProfileData) templ.Component {
return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil {
return templ_7745c5c3_CtxErr
}
templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W)
if !templ_7745c5c3_IsBuffer {
defer func() {
templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer)
if templ_7745c5c3_Err == nil {
templ_7745c5c3_Err = templ_7745c5c3_BufErr
}
}()
}
ctx = templ.InitializeContext(ctx)
templ_7745c5c3_Var1 := templ.GetChildren(ctx)
if templ_7745c5c3_Var1 == nil {
templ_7745c5c3_Var1 = templ.NopComponent
}
ctx = templ.ClearChildren(ctx)
templ_7745c5c3_Var2 := templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W)
if !templ_7745c5c3_IsBuffer {
defer func() {
templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer)
if templ_7745c5c3_Err == nil {
templ_7745c5c3_Err = templ_7745c5c3_BufErr
}
}()
}
ctx = templ.InitializeContext(ctx)
templ_7745c5c3_Var3 := templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W)
if !templ_7745c5c3_IsBuffer {
defer func() {
templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer)
if templ_7745c5c3_Err == nil {
templ_7745c5c3_Err = templ_7745c5c3_BufErr
}
}()
}
ctx = templ.InitializeContext(ctx)
templ_7745c5c3_Err = hero.TitleDesc("Basic Info", "Tell us a little about yourself.").Render(ctx, templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(" ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = formCreateProfile(data).Render(ctx, templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
return templ_7745c5c3_Err
})
templ_7745c5c3_Err = layout.Container().Render(templ.WithChildren(ctx, templ_7745c5c3_Var3), templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
return templ_7745c5c3_Err
})
templ_7745c5c3_Err = layout.View("New Profile | Sonr.ID").Render(templ.WithChildren(ctx, templ_7745c5c3_Var2), templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
return templ_7745c5c3_Err
})
}
func CreatePasskeyForm(data models.CreatePasskeyData) templ.Component {
return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil {
return templ_7745c5c3_CtxErr
}
templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W)
if !templ_7745c5c3_IsBuffer {
defer func() {
templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer)
if templ_7745c5c3_Err == nil {
templ_7745c5c3_Err = templ_7745c5c3_BufErr
}
}()
}
ctx = templ.InitializeContext(ctx)
templ_7745c5c3_Var4 := templ.GetChildren(ctx)
if templ_7745c5c3_Var4 == nil {
templ_7745c5c3_Var4 = templ.NopComponent
}
ctx = templ.ClearChildren(ctx)
templ_7745c5c3_Var5 := templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W)
if !templ_7745c5c3_IsBuffer {
defer func() {
templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer)
if templ_7745c5c3_Err == nil {
templ_7745c5c3_Err = templ_7745c5c3_BufErr
}
}()
}
ctx = templ.InitializeContext(ctx)
templ_7745c5c3_Var6 := templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W)
if !templ_7745c5c3_IsBuffer {
defer func() {
templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer)
if templ_7745c5c3_Err == nil {
templ_7745c5c3_Err = templ_7745c5c3_BufErr
}
}()
}
ctx = templ.InitializeContext(ctx)
templ_7745c5c3_Err = hero.TitleDesc("Link a PassKey", "This will be used to login to your vault.").Render(ctx, templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(" ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = formCreatePasskey(data).Render(ctx, templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
return templ_7745c5c3_Err
})
templ_7745c5c3_Err = layout.Container().Render(templ.WithChildren(ctx, templ_7745c5c3_Var6), templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
return templ_7745c5c3_Err
})
templ_7745c5c3_Err = layout.View("Register | Sonr.ID").Render(templ.WithChildren(ctx, templ_7745c5c3_Var5), templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
return templ_7745c5c3_Err
})
}
func LoadingVaultView() templ.Component {
return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil {
return templ_7745c5c3_CtxErr
}
templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W)
if !templ_7745c5c3_IsBuffer {
defer func() {
templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer)
if templ_7745c5c3_Err == nil {
templ_7745c5c3_Err = templ_7745c5c3_BufErr
}
}()
}
ctx = templ.InitializeContext(ctx)
templ_7745c5c3_Var7 := templ.GetChildren(ctx)
if templ_7745c5c3_Var7 == nil {
templ_7745c5c3_Var7 = templ.NopComponent
}
ctx = templ.ClearChildren(ctx)
templ_7745c5c3_Var8 := templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W)
if !templ_7745c5c3_IsBuffer {
defer func() {
templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer)
if templ_7745c5c3_Err == nil {
templ_7745c5c3_Err = templ_7745c5c3_BufErr
}
}()
}
ctx = templ.InitializeContext(ctx)
templ_7745c5c3_Var9 := templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W)
if !templ_7745c5c3_IsBuffer {
defer func() {
templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer)
if templ_7745c5c3_Err == nil {
templ_7745c5c3_Err = templ_7745c5c3_BufErr
}
}()
}
ctx = templ.InitializeContext(ctx)
templ_7745c5c3_Err = hero.TitleDesc("Loading Vault", "This will be used to login to your vault.").Render(ctx, templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
return templ_7745c5c3_Err
})
templ_7745c5c3_Err = layout.Container().Render(templ.WithChildren(ctx, templ_7745c5c3_Var9), templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
return templ_7745c5c3_Err
})
templ_7745c5c3_Err = layout.View("Loading... | Sonr.ID").Render(templ.WithChildren(ctx, templ_7745c5c3_Var8), templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
return templ_7745c5c3_Err
})
}
func formCreatePasskey(data models.CreatePasskeyData) templ.Component {
return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil {
return templ_7745c5c3_CtxErr
}
templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W)
if !templ_7745c5c3_IsBuffer {
defer func() {
templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer)
if templ_7745c5c3_Err == nil {
templ_7745c5c3_Err = templ_7745c5c3_BufErr
}
}()
}
ctx = templ.InitializeContext(ctx)
templ_7745c5c3_Var10 := templ.GetChildren(ctx)
if templ_7745c5c3_Var10 == nil {
templ_7745c5c3_Var10 = templ.NopComponent
}
ctx = templ.ClearChildren(ctx)
templ_7745c5c3_Var11 := templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W)
if !templ_7745c5c3_IsBuffer {
defer func() {
templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer)
if templ_7745c5c3_Err == nil {
templ_7745c5c3_Err = templ_7745c5c3_BufErr
}
}()
}
ctx = templ.InitializeContext(ctx)
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString("<input type=\"hidden\" name=\"credential\" id=\"credential-data\" required>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Var12 := templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W)
if !templ_7745c5c3_IsBuffer {
defer func() {
templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer)
if templ_7745c5c3_Err == nil {
templ_7745c5c3_Err = templ_7745c5c3_BufErr
}
}()
}
ctx = templ.InitializeContext(ctx)
templ_7745c5c3_Var13 := templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W)
if !templ_7745c5c3_IsBuffer {
defer func() {
templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer)
if templ_7745c5c3_Err == nil {
templ_7745c5c3_Err = templ_7745c5c3_BufErr
}
}()
}
ctx = templ.InitializeContext(ctx)
templ_7745c5c3_Err = card.SonrProfile(data.Address, data.Name, data.Handle, data.CreationBlock).Render(ctx, templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
return templ_7745c5c3_Err
})
templ_7745c5c3_Err = form.Header().Render(templ.WithChildren(ctx, templ_7745c5c3_Var13), templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(" ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = input.CoinSelect().Render(ctx, templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(" ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Var14 := templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W)
if !templ_7745c5c3_IsBuffer {
defer func() {
templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer)
if templ_7745c5c3_Err == nil {
templ_7745c5c3_Err = templ_7745c5c3_BufErr
}
}()
}
ctx = templ.InitializeContext(ctx)
templ_7745c5c3_Err = input.Passkey(data.Address, data.Handle, data.Challenge).Render(ctx, templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(" ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = form.CancelButton().Render(ctx, templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
return templ_7745c5c3_Err
})
templ_7745c5c3_Err = form.Footer().Render(templ.WithChildren(ctx, templ_7745c5c3_Var14), templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
return templ_7745c5c3_Err
})
templ_7745c5c3_Err = form.Body().Render(templ.WithChildren(ctx, templ_7745c5c3_Var12), templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
return templ_7745c5c3_Err
})
templ_7745c5c3_Err = form.Root("/register/finish", "POST", "passkey-form").Render(templ.WithChildren(ctx, templ_7745c5c3_Var11), templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
return templ_7745c5c3_Err
})
}
func formCreateProfile(data models.CreateProfileData) templ.Component {
return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil {
return templ_7745c5c3_CtxErr
}
templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W)
if !templ_7745c5c3_IsBuffer {
defer func() {
templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer)
if templ_7745c5c3_Err == nil {
templ_7745c5c3_Err = templ_7745c5c3_BufErr
}
}()
}
ctx = templ.InitializeContext(ctx)
templ_7745c5c3_Var15 := templ.GetChildren(ctx)
if templ_7745c5c3_Var15 == nil {
templ_7745c5c3_Var15 = templ.NopComponent
}
ctx = templ.ClearChildren(ctx)
templ_7745c5c3_Var16 := templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W)
if !templ_7745c5c3_IsBuffer {
defer func() {
templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer)
if templ_7745c5c3_Err == nil {
templ_7745c5c3_Err = templ_7745c5c3_BufErr
}
}()
}
ctx = templ.InitializeContext(ctx)
templ_7745c5c3_Var17 := templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W)
if !templ_7745c5c3_IsBuffer {
defer func() {
templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer)
if templ_7745c5c3_Err == nil {
templ_7745c5c3_Err = templ_7745c5c3_BufErr
}
}()
}
ctx = templ.InitializeContext(ctx)
templ_7745c5c3_Var18 := templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W)
if !templ_7745c5c3_IsBuffer {
defer func() {
templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer)
if templ_7745c5c3_Err == nil {
templ_7745c5c3_Err = templ_7745c5c3_BufErr
}
}()
}
ctx = templ.InitializeContext(ctx)
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString("<sl-progress-bar value=\"50\"></sl-progress-bar>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
return templ_7745c5c3_Err
})
templ_7745c5c3_Err = form.Header().Render(templ.WithChildren(ctx, templ_7745c5c3_Var18), templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(" ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = input.Name().Render(ctx, templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(" ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = input.Handle().Render(ctx, templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(" ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = input.HumanSlider(data.FirstNumber, data.LastNumber).Render(ctx, templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(" ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Var19 := templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W)
if !templ_7745c5c3_IsBuffer {
defer func() {
templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer)
if templ_7745c5c3_Err == nil {
templ_7745c5c3_Err = templ_7745c5c3_BufErr
}
}()
}
ctx = templ.InitializeContext(ctx)
templ_7745c5c3_Err = form.CancelButton().Render(ctx, templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(" ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = form.SubmitButton("Next").Render(ctx, templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
return templ_7745c5c3_Err
})
templ_7745c5c3_Err = form.Footer().Render(templ.WithChildren(ctx, templ_7745c5c3_Var19), templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
return templ_7745c5c3_Err
})
templ_7745c5c3_Err = form.Body().Render(templ.WithChildren(ctx, templ_7745c5c3_Var17), templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
return templ_7745c5c3_Err
})
templ_7745c5c3_Err = form.Root("/register/passkey", "POST", "create-profile").Render(templ.WithChildren(ctx, templ_7745c5c3_Var16), templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
return templ_7745c5c3_Err
})
}
var _ = templruntime.GeneratedTemplate

View File

@ -2,11 +2,11 @@
package models package models
import ( import (
"github.com/onsonr/sonr/pkg/common/models/keyalgorithm" "github.com/onsonr/sonr/internal/models/keyalgorithm"
"github.com/onsonr/sonr/pkg/common/models/keycurve" "github.com/onsonr/sonr/internal/models/keycurve"
"github.com/onsonr/sonr/pkg/common/models/keyencoding" "github.com/onsonr/sonr/internal/models/keyencoding"
"github.com/onsonr/sonr/pkg/common/models/keyrole" "github.com/onsonr/sonr/internal/models/keyrole"
"github.com/onsonr/sonr/pkg/common/models/keytype" "github.com/onsonr/sonr/internal/models/keytype"
) )
type DID struct { type DID struct {

Some files were not shown because too many files have changed in this diff Show More