feature/1214 session fetch refactor (#1215)

* chore(docs): remove token economy guide

* refactor(context): update GatewayContext to use Querier interface

* chore(database): update schema path

* docs: Update READMEs for x/did, x/dwn, and x/svc with UCAN integration

* chore(pkg): update database scope name

* refactor(did): optimize GenesisState proto methods

* refactor(svc): update Service proto to use repeated fields

* refactor(api): rename MsgSpawn to MsgInitialize
This commit is contained in:
Prad Nukala 2024-12-24 10:38:17 -05:00 committed by GitHub
parent 398864fc6b
commit 0ec2f7d86a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
68 changed files with 9551 additions and 9260 deletions

View File

@ -22,25 +22,3 @@ jobs:
with:
input: proto
buf_token: ${{ secrets.BUF_TOKEN }}
docs-push:
runs-on: ubuntu-latest
name: Publish Docs to onsonr.dev
steps:
- uses: actions/checkout@v4
- name: Configure Git Credentials
run: |
git config user.name github-actions[bot]
git config user.email 41898282+github-actions[bot]@users.noreply.github.com
- uses: actions/setup-python@v5
with:
python-version: 3.x
- run: echo "cache_id=$(date --utc '+%V')" >> $GITHUB_ENV
- uses: actions/cache@v4
with:
key: mkdocs-material-${{ env.cache_id }}
path: .cache
restore-keys: |
mkdocs-material-
- run: pip install mkdocs-material
- run: cd docs && mkdocs gh-deploy --force

File diff suppressed because it is too large Load Diff

View File

@ -871,27 +871,27 @@ func (x *fastReflection_MsgUpdateParamsResponse) ProtoMethods() *protoiface.Meth
}
var (
md_MsgSpawn protoreflect.MessageDescriptor
fd_MsgSpawn_authority protoreflect.FieldDescriptor
fd_MsgSpawn_params protoreflect.FieldDescriptor
md_MsgInitialize protoreflect.MessageDescriptor
fd_MsgInitialize_authority protoreflect.FieldDescriptor
fd_MsgInitialize_params protoreflect.FieldDescriptor
)
func init() {
file_dwn_v1_tx_proto_init()
md_MsgSpawn = File_dwn_v1_tx_proto.Messages().ByName("MsgSpawn")
fd_MsgSpawn_authority = md_MsgSpawn.Fields().ByName("authority")
fd_MsgSpawn_params = md_MsgSpawn.Fields().ByName("params")
md_MsgInitialize = File_dwn_v1_tx_proto.Messages().ByName("MsgInitialize")
fd_MsgInitialize_authority = md_MsgInitialize.Fields().ByName("authority")
fd_MsgInitialize_params = md_MsgInitialize.Fields().ByName("params")
}
var _ protoreflect.Message = (*fastReflection_MsgSpawn)(nil)
var _ protoreflect.Message = (*fastReflection_MsgInitialize)(nil)
type fastReflection_MsgSpawn MsgSpawn
type fastReflection_MsgInitialize MsgInitialize
func (x *MsgSpawn) ProtoReflect() protoreflect.Message {
return (*fastReflection_MsgSpawn)(x)
func (x *MsgInitialize) ProtoReflect() protoreflect.Message {
return (*fastReflection_MsgInitialize)(x)
}
func (x *MsgSpawn) slowProtoReflect() protoreflect.Message {
func (x *MsgInitialize) slowProtoReflect() protoreflect.Message {
mi := &file_dwn_v1_tx_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@ -903,43 +903,43 @@ func (x *MsgSpawn) slowProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
var _fastReflection_MsgSpawn_messageType fastReflection_MsgSpawn_messageType
var _ protoreflect.MessageType = fastReflection_MsgSpawn_messageType{}
var _fastReflection_MsgInitialize_messageType fastReflection_MsgInitialize_messageType
var _ protoreflect.MessageType = fastReflection_MsgInitialize_messageType{}
type fastReflection_MsgSpawn_messageType struct{}
type fastReflection_MsgInitialize_messageType struct{}
func (x fastReflection_MsgSpawn_messageType) Zero() protoreflect.Message {
return (*fastReflection_MsgSpawn)(nil)
func (x fastReflection_MsgInitialize_messageType) Zero() protoreflect.Message {
return (*fastReflection_MsgInitialize)(nil)
}
func (x fastReflection_MsgSpawn_messageType) New() protoreflect.Message {
return new(fastReflection_MsgSpawn)
func (x fastReflection_MsgInitialize_messageType) New() protoreflect.Message {
return new(fastReflection_MsgInitialize)
}
func (x fastReflection_MsgSpawn_messageType) Descriptor() protoreflect.MessageDescriptor {
return md_MsgSpawn
func (x fastReflection_MsgInitialize_messageType) Descriptor() protoreflect.MessageDescriptor {
return md_MsgInitialize
}
// Descriptor returns message descriptor, which contains only the protobuf
// type information for the message.
func (x *fastReflection_MsgSpawn) Descriptor() protoreflect.MessageDescriptor {
return md_MsgSpawn
func (x *fastReflection_MsgInitialize) Descriptor() protoreflect.MessageDescriptor {
return md_MsgInitialize
}
// Type returns the message type, which encapsulates both Go and protobuf
// type information. If the Go type information is not needed,
// it is recommended that the message descriptor be used instead.
func (x *fastReflection_MsgSpawn) Type() protoreflect.MessageType {
return _fastReflection_MsgSpawn_messageType
func (x *fastReflection_MsgInitialize) Type() protoreflect.MessageType {
return _fastReflection_MsgInitialize_messageType
}
// New returns a newly allocated and mutable empty message.
func (x *fastReflection_MsgSpawn) New() protoreflect.Message {
return new(fastReflection_MsgSpawn)
func (x *fastReflection_MsgInitialize) New() protoreflect.Message {
return new(fastReflection_MsgInitialize)
}
// Interface unwraps the message reflection interface and
// returns the underlying ProtoMessage interface.
func (x *fastReflection_MsgSpawn) Interface() protoreflect.ProtoMessage {
return (*MsgSpawn)(x)
func (x *fastReflection_MsgInitialize) Interface() protoreflect.ProtoMessage {
return (*MsgInitialize)(x)
}
// Range iterates over every populated field in an undefined order,
@ -947,16 +947,16 @@ func (x *fastReflection_MsgSpawn) Interface() protoreflect.ProtoMessage {
// Range returns immediately if f returns false.
// While iterating, mutating operations may only be performed
// on the current field descriptor.
func (x *fastReflection_MsgSpawn) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) {
func (x *fastReflection_MsgInitialize) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) {
if x.Authority != "" {
value := protoreflect.ValueOfString(x.Authority)
if !f(fd_MsgSpawn_authority, value) {
if !f(fd_MsgInitialize_authority, value) {
return
}
}
if x.Params != nil {
value := protoreflect.ValueOfMessage(x.Params.ProtoReflect())
if !f(fd_MsgSpawn_params, value) {
if !f(fd_MsgInitialize_params, value) {
return
}
}
@ -973,17 +973,17 @@ func (x *fastReflection_MsgSpawn) Range(f func(protoreflect.FieldDescriptor, pro
// In other cases (aside from the nullable cases above),
// a proto3 scalar field is populated if it contains a non-zero value, and
// a repeated field is populated if it is non-empty.
func (x *fastReflection_MsgSpawn) Has(fd protoreflect.FieldDescriptor) bool {
func (x *fastReflection_MsgInitialize) Has(fd protoreflect.FieldDescriptor) bool {
switch fd.FullName() {
case "dwn.v1.MsgSpawn.authority":
case "dwn.v1.MsgInitialize.authority":
return x.Authority != ""
case "dwn.v1.MsgSpawn.params":
case "dwn.v1.MsgInitialize.params":
return x.Params != nil
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: dwn.v1.MsgSpawn"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: dwn.v1.MsgInitialize"))
}
panic(fmt.Errorf("message dwn.v1.MsgSpawn does not contain field %s", fd.FullName()))
panic(fmt.Errorf("message dwn.v1.MsgInitialize does not contain field %s", fd.FullName()))
}
}
@ -993,17 +993,17 @@ func (x *fastReflection_MsgSpawn) Has(fd protoreflect.FieldDescriptor) bool {
// associated with the given field number.
//
// Clear is a mutating operation and unsafe for concurrent use.
func (x *fastReflection_MsgSpawn) Clear(fd protoreflect.FieldDescriptor) {
func (x *fastReflection_MsgInitialize) Clear(fd protoreflect.FieldDescriptor) {
switch fd.FullName() {
case "dwn.v1.MsgSpawn.authority":
case "dwn.v1.MsgInitialize.authority":
x.Authority = ""
case "dwn.v1.MsgSpawn.params":
case "dwn.v1.MsgInitialize.params":
x.Params = nil
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: dwn.v1.MsgSpawn"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: dwn.v1.MsgInitialize"))
}
panic(fmt.Errorf("message dwn.v1.MsgSpawn does not contain field %s", fd.FullName()))
panic(fmt.Errorf("message dwn.v1.MsgInitialize does not contain field %s", fd.FullName()))
}
}
@ -1013,19 +1013,19 @@ func (x *fastReflection_MsgSpawn) Clear(fd protoreflect.FieldDescriptor) {
// the default value of a bytes scalar is guaranteed to be a copy.
// For unpopulated composite types, it returns an empty, read-only view
// of the value; to obtain a mutable reference, use Mutable.
func (x *fastReflection_MsgSpawn) Get(descriptor protoreflect.FieldDescriptor) protoreflect.Value {
func (x *fastReflection_MsgInitialize) Get(descriptor protoreflect.FieldDescriptor) protoreflect.Value {
switch descriptor.FullName() {
case "dwn.v1.MsgSpawn.authority":
case "dwn.v1.MsgInitialize.authority":
value := x.Authority
return protoreflect.ValueOfString(value)
case "dwn.v1.MsgSpawn.params":
case "dwn.v1.MsgInitialize.params":
value := x.Params
return protoreflect.ValueOfMessage(value.ProtoReflect())
default:
if descriptor.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: dwn.v1.MsgSpawn"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: dwn.v1.MsgInitialize"))
}
panic(fmt.Errorf("message dwn.v1.MsgSpawn does not contain field %s", descriptor.FullName()))
panic(fmt.Errorf("message dwn.v1.MsgInitialize does not contain field %s", descriptor.FullName()))
}
}
@ -1039,17 +1039,17 @@ func (x *fastReflection_MsgSpawn) Get(descriptor protoreflect.FieldDescriptor) p
// empty, read-only value, then it panics.
//
// Set is a mutating operation and unsafe for concurrent use.
func (x *fastReflection_MsgSpawn) Set(fd protoreflect.FieldDescriptor, value protoreflect.Value) {
func (x *fastReflection_MsgInitialize) Set(fd protoreflect.FieldDescriptor, value protoreflect.Value) {
switch fd.FullName() {
case "dwn.v1.MsgSpawn.authority":
case "dwn.v1.MsgInitialize.authority":
x.Authority = value.Interface().(string)
case "dwn.v1.MsgSpawn.params":
case "dwn.v1.MsgInitialize.params":
x.Params = value.Message().Interface().(*Params)
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: dwn.v1.MsgSpawn"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: dwn.v1.MsgInitialize"))
}
panic(fmt.Errorf("message dwn.v1.MsgSpawn does not contain field %s", fd.FullName()))
panic(fmt.Errorf("message dwn.v1.MsgInitialize does not contain field %s", fd.FullName()))
}
}
@ -1063,48 +1063,48 @@ func (x *fastReflection_MsgSpawn) Set(fd protoreflect.FieldDescriptor, value pro
// It panics if the field does not contain a composite type.
//
// Mutable is a mutating operation and unsafe for concurrent use.
func (x *fastReflection_MsgSpawn) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value {
func (x *fastReflection_MsgInitialize) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value {
switch fd.FullName() {
case "dwn.v1.MsgSpawn.params":
case "dwn.v1.MsgInitialize.params":
if x.Params == nil {
x.Params = new(Params)
}
return protoreflect.ValueOfMessage(x.Params.ProtoReflect())
case "dwn.v1.MsgSpawn.authority":
panic(fmt.Errorf("field authority of message dwn.v1.MsgSpawn is not mutable"))
case "dwn.v1.MsgInitialize.authority":
panic(fmt.Errorf("field authority of message dwn.v1.MsgInitialize is not mutable"))
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: dwn.v1.MsgSpawn"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: dwn.v1.MsgInitialize"))
}
panic(fmt.Errorf("message dwn.v1.MsgSpawn does not contain field %s", fd.FullName()))
panic(fmt.Errorf("message dwn.v1.MsgInitialize does not contain field %s", fd.FullName()))
}
}
// NewField returns a new value that is assignable to the field
// for the given descriptor. For scalars, this returns the default value.
// For lists, maps, and messages, this returns a new, empty, mutable value.
func (x *fastReflection_MsgSpawn) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value {
func (x *fastReflection_MsgInitialize) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value {
switch fd.FullName() {
case "dwn.v1.MsgSpawn.authority":
case "dwn.v1.MsgInitialize.authority":
return protoreflect.ValueOfString("")
case "dwn.v1.MsgSpawn.params":
case "dwn.v1.MsgInitialize.params":
m := new(Params)
return protoreflect.ValueOfMessage(m.ProtoReflect())
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: dwn.v1.MsgSpawn"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: dwn.v1.MsgInitialize"))
}
panic(fmt.Errorf("message dwn.v1.MsgSpawn does not contain field %s", fd.FullName()))
panic(fmt.Errorf("message dwn.v1.MsgInitialize does not contain field %s", fd.FullName()))
}
}
// WhichOneof reports which field within the oneof is populated,
// returning nil if none are populated.
// It panics if the oneof descriptor does not belong to this message.
func (x *fastReflection_MsgSpawn) WhichOneof(d protoreflect.OneofDescriptor) protoreflect.FieldDescriptor {
func (x *fastReflection_MsgInitialize) WhichOneof(d protoreflect.OneofDescriptor) protoreflect.FieldDescriptor {
switch d.FullName() {
default:
panic(fmt.Errorf("%s is not a oneof field in dwn.v1.MsgSpawn", d.FullName()))
panic(fmt.Errorf("%s is not a oneof field in dwn.v1.MsgInitialize", d.FullName()))
}
panic("unreachable")
}
@ -1112,7 +1112,7 @@ func (x *fastReflection_MsgSpawn) WhichOneof(d protoreflect.OneofDescriptor) pro
// GetUnknown retrieves the entire list of unknown fields.
// The caller may only mutate the contents of the RawFields
// if the mutated bytes are stored back into the message with SetUnknown.
func (x *fastReflection_MsgSpawn) GetUnknown() protoreflect.RawFields {
func (x *fastReflection_MsgInitialize) GetUnknown() protoreflect.RawFields {
return x.unknownFields
}
@ -1123,7 +1123,7 @@ func (x *fastReflection_MsgSpawn) GetUnknown() protoreflect.RawFields {
// An empty RawFields may be passed to clear the fields.
//
// SetUnknown is a mutating operation and unsafe for concurrent use.
func (x *fastReflection_MsgSpawn) SetUnknown(fields protoreflect.RawFields) {
func (x *fastReflection_MsgInitialize) SetUnknown(fields protoreflect.RawFields) {
x.unknownFields = fields
}
@ -1135,7 +1135,7 @@ func (x *fastReflection_MsgSpawn) SetUnknown(fields protoreflect.RawFields) {
// message type, but the details are implementation dependent.
// Validity is not part of the protobuf data model, and may not
// be preserved in marshaling or other operations.
func (x *fastReflection_MsgSpawn) IsValid() bool {
func (x *fastReflection_MsgInitialize) IsValid() bool {
return x != nil
}
@ -1145,9 +1145,9 @@ func (x *fastReflection_MsgSpawn) IsValid() bool {
// The returned methods type is identical to
// "google.golang.org/protobuf/runtime/protoiface".Methods.
// Consult the protoiface package documentation for details.
func (x *fastReflection_MsgSpawn) ProtoMethods() *protoiface.Methods {
func (x *fastReflection_MsgInitialize) ProtoMethods() *protoiface.Methods {
size := func(input protoiface.SizeInput) protoiface.SizeOutput {
x := input.Message.Interface().(*MsgSpawn)
x := input.Message.Interface().(*MsgInitialize)
if x == nil {
return protoiface.SizeOutput{
NoUnkeyedLiterals: input.NoUnkeyedLiterals,
@ -1177,7 +1177,7 @@ func (x *fastReflection_MsgSpawn) ProtoMethods() *protoiface.Methods {
}
marshal := func(input protoiface.MarshalInput) (protoiface.MarshalOutput, error) {
x := input.Message.Interface().(*MsgSpawn)
x := input.Message.Interface().(*MsgInitialize)
if x == nil {
return protoiface.MarshalOutput{
NoUnkeyedLiterals: input.NoUnkeyedLiterals,
@ -1228,7 +1228,7 @@ func (x *fastReflection_MsgSpawn) ProtoMethods() *protoiface.Methods {
}, nil
}
unmarshal := func(input protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) {
x := input.Message.Interface().(*MsgSpawn)
x := input.Message.Interface().(*MsgInitialize)
if x == nil {
return protoiface.UnmarshalOutput{
NoUnkeyedLiterals: input.NoUnkeyedLiterals,
@ -1260,10 +1260,10 @@ func (x *fastReflection_MsgSpawn) ProtoMethods() *protoiface.Methods {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: MsgSpawn: wiretype end group for non-group")
return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: MsgInitialize: wiretype end group for non-group")
}
if fieldNum <= 0 {
return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: MsgSpawn: illegal tag %d (wire type %d)", fieldNum, wire)
return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: MsgInitialize: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
@ -1370,23 +1370,23 @@ func (x *fastReflection_MsgSpawn) ProtoMethods() *protoiface.Methods {
}
var (
md_MsgSpawnResponse protoreflect.MessageDescriptor
md_MsgInitializeResponse protoreflect.MessageDescriptor
)
func init() {
file_dwn_v1_tx_proto_init()
md_MsgSpawnResponse = File_dwn_v1_tx_proto.Messages().ByName("MsgSpawnResponse")
md_MsgInitializeResponse = File_dwn_v1_tx_proto.Messages().ByName("MsgInitializeResponse")
}
var _ protoreflect.Message = (*fastReflection_MsgSpawnResponse)(nil)
var _ protoreflect.Message = (*fastReflection_MsgInitializeResponse)(nil)
type fastReflection_MsgSpawnResponse MsgSpawnResponse
type fastReflection_MsgInitializeResponse MsgInitializeResponse
func (x *MsgSpawnResponse) ProtoReflect() protoreflect.Message {
return (*fastReflection_MsgSpawnResponse)(x)
func (x *MsgInitializeResponse) ProtoReflect() protoreflect.Message {
return (*fastReflection_MsgInitializeResponse)(x)
}
func (x *MsgSpawnResponse) slowProtoReflect() protoreflect.Message {
func (x *MsgInitializeResponse) slowProtoReflect() protoreflect.Message {
mi := &file_dwn_v1_tx_proto_msgTypes[3]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@ -1398,43 +1398,43 @@ func (x *MsgSpawnResponse) slowProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
var _fastReflection_MsgSpawnResponse_messageType fastReflection_MsgSpawnResponse_messageType
var _ protoreflect.MessageType = fastReflection_MsgSpawnResponse_messageType{}
var _fastReflection_MsgInitializeResponse_messageType fastReflection_MsgInitializeResponse_messageType
var _ protoreflect.MessageType = fastReflection_MsgInitializeResponse_messageType{}
type fastReflection_MsgSpawnResponse_messageType struct{}
type fastReflection_MsgInitializeResponse_messageType struct{}
func (x fastReflection_MsgSpawnResponse_messageType) Zero() protoreflect.Message {
return (*fastReflection_MsgSpawnResponse)(nil)
func (x fastReflection_MsgInitializeResponse_messageType) Zero() protoreflect.Message {
return (*fastReflection_MsgInitializeResponse)(nil)
}
func (x fastReflection_MsgSpawnResponse_messageType) New() protoreflect.Message {
return new(fastReflection_MsgSpawnResponse)
func (x fastReflection_MsgInitializeResponse_messageType) New() protoreflect.Message {
return new(fastReflection_MsgInitializeResponse)
}
func (x fastReflection_MsgSpawnResponse_messageType) Descriptor() protoreflect.MessageDescriptor {
return md_MsgSpawnResponse
func (x fastReflection_MsgInitializeResponse_messageType) Descriptor() protoreflect.MessageDescriptor {
return md_MsgInitializeResponse
}
// Descriptor returns message descriptor, which contains only the protobuf
// type information for the message.
func (x *fastReflection_MsgSpawnResponse) Descriptor() protoreflect.MessageDescriptor {
return md_MsgSpawnResponse
func (x *fastReflection_MsgInitializeResponse) Descriptor() protoreflect.MessageDescriptor {
return md_MsgInitializeResponse
}
// Type returns the message type, which encapsulates both Go and protobuf
// type information. If the Go type information is not needed,
// it is recommended that the message descriptor be used instead.
func (x *fastReflection_MsgSpawnResponse) Type() protoreflect.MessageType {
return _fastReflection_MsgSpawnResponse_messageType
func (x *fastReflection_MsgInitializeResponse) Type() protoreflect.MessageType {
return _fastReflection_MsgInitializeResponse_messageType
}
// New returns a newly allocated and mutable empty message.
func (x *fastReflection_MsgSpawnResponse) New() protoreflect.Message {
return new(fastReflection_MsgSpawnResponse)
func (x *fastReflection_MsgInitializeResponse) New() protoreflect.Message {
return new(fastReflection_MsgInitializeResponse)
}
// Interface unwraps the message reflection interface and
// returns the underlying ProtoMessage interface.
func (x *fastReflection_MsgSpawnResponse) Interface() protoreflect.ProtoMessage {
return (*MsgSpawnResponse)(x)
func (x *fastReflection_MsgInitializeResponse) Interface() protoreflect.ProtoMessage {
return (*MsgInitializeResponse)(x)
}
// Range iterates over every populated field in an undefined order,
@ -1442,7 +1442,7 @@ func (x *fastReflection_MsgSpawnResponse) Interface() protoreflect.ProtoMessage
// Range returns immediately if f returns false.
// While iterating, mutating operations may only be performed
// on the current field descriptor.
func (x *fastReflection_MsgSpawnResponse) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) {
func (x *fastReflection_MsgInitializeResponse) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) {
}
// Has reports whether a field is populated.
@ -1456,13 +1456,13 @@ func (x *fastReflection_MsgSpawnResponse) Range(f func(protoreflect.FieldDescrip
// In other cases (aside from the nullable cases above),
// a proto3 scalar field is populated if it contains a non-zero value, and
// a repeated field is populated if it is non-empty.
func (x *fastReflection_MsgSpawnResponse) Has(fd protoreflect.FieldDescriptor) bool {
func (x *fastReflection_MsgInitializeResponse) Has(fd protoreflect.FieldDescriptor) bool {
switch fd.FullName() {
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: dwn.v1.MsgSpawnResponse"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: dwn.v1.MsgInitializeResponse"))
}
panic(fmt.Errorf("message dwn.v1.MsgSpawnResponse does not contain field %s", fd.FullName()))
panic(fmt.Errorf("message dwn.v1.MsgInitializeResponse does not contain field %s", fd.FullName()))
}
}
@ -1472,13 +1472,13 @@ func (x *fastReflection_MsgSpawnResponse) Has(fd protoreflect.FieldDescriptor) b
// associated with the given field number.
//
// Clear is a mutating operation and unsafe for concurrent use.
func (x *fastReflection_MsgSpawnResponse) Clear(fd protoreflect.FieldDescriptor) {
func (x *fastReflection_MsgInitializeResponse) Clear(fd protoreflect.FieldDescriptor) {
switch fd.FullName() {
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: dwn.v1.MsgSpawnResponse"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: dwn.v1.MsgInitializeResponse"))
}
panic(fmt.Errorf("message dwn.v1.MsgSpawnResponse does not contain field %s", fd.FullName()))
panic(fmt.Errorf("message dwn.v1.MsgInitializeResponse does not contain field %s", fd.FullName()))
}
}
@ -1488,13 +1488,13 @@ func (x *fastReflection_MsgSpawnResponse) Clear(fd protoreflect.FieldDescriptor)
// the default value of a bytes scalar is guaranteed to be a copy.
// For unpopulated composite types, it returns an empty, read-only view
// of the value; to obtain a mutable reference, use Mutable.
func (x *fastReflection_MsgSpawnResponse) Get(descriptor protoreflect.FieldDescriptor) protoreflect.Value {
func (x *fastReflection_MsgInitializeResponse) Get(descriptor protoreflect.FieldDescriptor) protoreflect.Value {
switch descriptor.FullName() {
default:
if descriptor.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: dwn.v1.MsgSpawnResponse"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: dwn.v1.MsgInitializeResponse"))
}
panic(fmt.Errorf("message dwn.v1.MsgSpawnResponse does not contain field %s", descriptor.FullName()))
panic(fmt.Errorf("message dwn.v1.MsgInitializeResponse does not contain field %s", descriptor.FullName()))
}
}
@ -1508,13 +1508,13 @@ func (x *fastReflection_MsgSpawnResponse) Get(descriptor protoreflect.FieldDescr
// empty, read-only value, then it panics.
//
// Set is a mutating operation and unsafe for concurrent use.
func (x *fastReflection_MsgSpawnResponse) Set(fd protoreflect.FieldDescriptor, value protoreflect.Value) {
func (x *fastReflection_MsgInitializeResponse) Set(fd protoreflect.FieldDescriptor, value protoreflect.Value) {
switch fd.FullName() {
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: dwn.v1.MsgSpawnResponse"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: dwn.v1.MsgInitializeResponse"))
}
panic(fmt.Errorf("message dwn.v1.MsgSpawnResponse does not contain field %s", fd.FullName()))
panic(fmt.Errorf("message dwn.v1.MsgInitializeResponse does not contain field %s", fd.FullName()))
}
}
@ -1528,36 +1528,36 @@ func (x *fastReflection_MsgSpawnResponse) Set(fd protoreflect.FieldDescriptor, v
// It panics if the field does not contain a composite type.
//
// Mutable is a mutating operation and unsafe for concurrent use.
func (x *fastReflection_MsgSpawnResponse) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value {
func (x *fastReflection_MsgInitializeResponse) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value {
switch fd.FullName() {
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: dwn.v1.MsgSpawnResponse"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: dwn.v1.MsgInitializeResponse"))
}
panic(fmt.Errorf("message dwn.v1.MsgSpawnResponse does not contain field %s", fd.FullName()))
panic(fmt.Errorf("message dwn.v1.MsgInitializeResponse does not contain field %s", fd.FullName()))
}
}
// NewField returns a new value that is assignable to the field
// for the given descriptor. For scalars, this returns the default value.
// For lists, maps, and messages, this returns a new, empty, mutable value.
func (x *fastReflection_MsgSpawnResponse) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value {
func (x *fastReflection_MsgInitializeResponse) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value {
switch fd.FullName() {
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: dwn.v1.MsgSpawnResponse"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: dwn.v1.MsgInitializeResponse"))
}
panic(fmt.Errorf("message dwn.v1.MsgSpawnResponse does not contain field %s", fd.FullName()))
panic(fmt.Errorf("message dwn.v1.MsgInitializeResponse does not contain field %s", fd.FullName()))
}
}
// WhichOneof reports which field within the oneof is populated,
// returning nil if none are populated.
// It panics if the oneof descriptor does not belong to this message.
func (x *fastReflection_MsgSpawnResponse) WhichOneof(d protoreflect.OneofDescriptor) protoreflect.FieldDescriptor {
func (x *fastReflection_MsgInitializeResponse) WhichOneof(d protoreflect.OneofDescriptor) protoreflect.FieldDescriptor {
switch d.FullName() {
default:
panic(fmt.Errorf("%s is not a oneof field in dwn.v1.MsgSpawnResponse", d.FullName()))
panic(fmt.Errorf("%s is not a oneof field in dwn.v1.MsgInitializeResponse", d.FullName()))
}
panic("unreachable")
}
@ -1565,7 +1565,7 @@ func (x *fastReflection_MsgSpawnResponse) WhichOneof(d protoreflect.OneofDescrip
// GetUnknown retrieves the entire list of unknown fields.
// The caller may only mutate the contents of the RawFields
// if the mutated bytes are stored back into the message with SetUnknown.
func (x *fastReflection_MsgSpawnResponse) GetUnknown() protoreflect.RawFields {
func (x *fastReflection_MsgInitializeResponse) GetUnknown() protoreflect.RawFields {
return x.unknownFields
}
@ -1576,7 +1576,7 @@ func (x *fastReflection_MsgSpawnResponse) GetUnknown() protoreflect.RawFields {
// An empty RawFields may be passed to clear the fields.
//
// SetUnknown is a mutating operation and unsafe for concurrent use.
func (x *fastReflection_MsgSpawnResponse) SetUnknown(fields protoreflect.RawFields) {
func (x *fastReflection_MsgInitializeResponse) SetUnknown(fields protoreflect.RawFields) {
x.unknownFields = fields
}
@ -1588,7 +1588,7 @@ func (x *fastReflection_MsgSpawnResponse) SetUnknown(fields protoreflect.RawFiel
// message type, but the details are implementation dependent.
// Validity is not part of the protobuf data model, and may not
// be preserved in marshaling or other operations.
func (x *fastReflection_MsgSpawnResponse) IsValid() bool {
func (x *fastReflection_MsgInitializeResponse) IsValid() bool {
return x != nil
}
@ -1598,9 +1598,9 @@ func (x *fastReflection_MsgSpawnResponse) IsValid() bool {
// The returned methods type is identical to
// "google.golang.org/protobuf/runtime/protoiface".Methods.
// Consult the protoiface package documentation for details.
func (x *fastReflection_MsgSpawnResponse) ProtoMethods() *protoiface.Methods {
func (x *fastReflection_MsgInitializeResponse) ProtoMethods() *protoiface.Methods {
size := func(input protoiface.SizeInput) protoiface.SizeOutput {
x := input.Message.Interface().(*MsgSpawnResponse)
x := input.Message.Interface().(*MsgInitializeResponse)
if x == nil {
return protoiface.SizeOutput{
NoUnkeyedLiterals: input.NoUnkeyedLiterals,
@ -1622,7 +1622,7 @@ func (x *fastReflection_MsgSpawnResponse) ProtoMethods() *protoiface.Methods {
}
marshal := func(input protoiface.MarshalInput) (protoiface.MarshalOutput, error) {
x := input.Message.Interface().(*MsgSpawnResponse)
x := input.Message.Interface().(*MsgInitializeResponse)
if x == nil {
return protoiface.MarshalOutput{
NoUnkeyedLiterals: input.NoUnkeyedLiterals,
@ -1652,7 +1652,7 @@ func (x *fastReflection_MsgSpawnResponse) ProtoMethods() *protoiface.Methods {
}, nil
}
unmarshal := func(input protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) {
x := input.Message.Interface().(*MsgSpawnResponse)
x := input.Message.Interface().(*MsgInitializeResponse)
if x == nil {
return protoiface.UnmarshalOutput{
NoUnkeyedLiterals: input.NoUnkeyedLiterals,
@ -1684,10 +1684,10 @@ func (x *fastReflection_MsgSpawnResponse) ProtoMethods() *protoiface.Methods {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: MsgSpawnResponse: wiretype end group for non-group")
return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: MsgInitializeResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: MsgSpawnResponse: illegal tag %d (wire type %d)", fieldNum, wire)
return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: MsgInitializeResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
default:
@ -1822,7 +1822,7 @@ func (*MsgUpdateParamsResponse) Descriptor() ([]byte, []int) {
// operation that must be performed interacting with the Vault.
//
// Since: cosmos-sdk 0.47
type MsgSpawn struct {
type MsgInitialize struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
@ -1835,8 +1835,8 @@ type MsgSpawn struct {
Params *Params `protobuf:"bytes,2,opt,name=params,proto3" json:"params,omitempty"`
}
func (x *MsgSpawn) Reset() {
*x = MsgSpawn{}
func (x *MsgInitialize) Reset() {
*x = MsgInitialize{}
if protoimpl.UnsafeEnabled {
mi := &file_dwn_v1_tx_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@ -1844,25 +1844,25 @@ func (x *MsgSpawn) Reset() {
}
}
func (x *MsgSpawn) String() string {
func (x *MsgInitialize) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*MsgSpawn) ProtoMessage() {}
func (*MsgInitialize) ProtoMessage() {}
// Deprecated: Use MsgSpawn.ProtoReflect.Descriptor instead.
func (*MsgSpawn) Descriptor() ([]byte, []int) {
// Deprecated: Use MsgInitialize.ProtoReflect.Descriptor instead.
func (*MsgInitialize) Descriptor() ([]byte, []int) {
return file_dwn_v1_tx_proto_rawDescGZIP(), []int{2}
}
func (x *MsgSpawn) GetAuthority() string {
func (x *MsgInitialize) GetAuthority() string {
if x != nil {
return x.Authority
}
return ""
}
func (x *MsgSpawn) GetParams() *Params {
func (x *MsgInitialize) GetParams() *Params {
if x != nil {
return x.Params
}
@ -1873,14 +1873,14 @@ func (x *MsgSpawn) GetParams() *Params {
// MsgSpawn message.
//
// Since: cosmos-sdk 0.47
type MsgSpawnResponse struct {
type MsgInitializeResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
}
func (x *MsgSpawnResponse) Reset() {
*x = MsgSpawnResponse{}
func (x *MsgInitializeResponse) Reset() {
*x = MsgInitializeResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_dwn_v1_tx_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@ -1888,14 +1888,14 @@ func (x *MsgSpawnResponse) Reset() {
}
}
func (x *MsgSpawnResponse) String() string {
func (x *MsgInitializeResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*MsgSpawnResponse) ProtoMessage() {}
func (*MsgInitializeResponse) ProtoMessage() {}
// Deprecated: Use MsgSpawnResponse.ProtoReflect.Descriptor instead.
func (*MsgSpawnResponse) Descriptor() ([]byte, []int) {
// Deprecated: Use MsgInitializeResponse.ProtoReflect.Descriptor instead.
func (*MsgInitializeResponse) Descriptor() ([]byte, []int) {
return file_dwn_v1_tx_proto_rawDescGZIP(), []int{3}
}
@ -1919,34 +1919,35 @@ var file_dwn_v1_tx_proto_rawDesc = []byte{
0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x04, 0xc8, 0xde, 0x1f, 0x00, 0x52, 0x06, 0x70, 0x61, 0x72,
0x61, 0x6d, 0x73, 0x3a, 0x0e, 0x82, 0xe7, 0xb0, 0x2a, 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72,
0x69, 0x74, 0x79, 0x22, 0x19, 0x0a, 0x17, 0x4d, 0x73, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65,
0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x80,
0x01, 0x0a, 0x08, 0x4d, 0x73, 0x67, 0x53, 0x70, 0x61, 0x77, 0x6e, 0x12, 0x36, 0x0a, 0x09, 0x61,
0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x18,
0xd2, 0xb4, 0x2d, 0x14, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65,
0x73, 0x73, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72,
0x69, 0x74, 0x79, 0x12, 0x2c, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x02, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x64, 0x77, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x61, 0x72,
0x61, 0x6d, 0x73, 0x42, 0x04, 0xc8, 0xde, 0x1f, 0x00, 0x52, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d,
0x73, 0x3a, 0x0e, 0x82, 0xe7, 0xb0, 0x2a, 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74,
0x79, 0x22, 0x12, 0x0a, 0x10, 0x4d, 0x73, 0x67, 0x53, 0x70, 0x61, 0x77, 0x6e, 0x52, 0x65, 0x73,
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x8b, 0x01, 0x0a, 0x03, 0x4d, 0x73, 0x67, 0x12, 0x48, 0x0a,
0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x17, 0x2e,
0x64, 0x77, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x73, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65,
0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x1f, 0x2e, 0x64, 0x77, 0x6e, 0x2e, 0x76, 0x31, 0x2e,
0x4d, 0x73, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52,
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x05, 0x53, 0x70, 0x61, 0x77, 0x6e,
0x12, 0x10, 0x2e, 0x64, 0x77, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x73, 0x67, 0x53, 0x70, 0x61,
0x77, 0x6e, 0x1a, 0x18, 0x2e, 0x64, 0x77, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x73, 0x67, 0x53,
0x70, 0x61, 0x77, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x1a, 0x05, 0x80, 0xe7,
0xb0, 0x2a, 0x01, 0x42, 0x77, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x2e, 0x64, 0x77, 0x6e, 0x2e, 0x76,
0x31, 0x42, 0x07, 0x54, 0x78, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x27, 0x67, 0x69,
0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6f, 0x6e, 0x73, 0x6f, 0x6e, 0x72, 0x2f,
0x73, 0x6f, 0x6e, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x64, 0x77, 0x6e, 0x2f, 0x76, 0x31, 0x3b,
0x64, 0x77, 0x6e, 0x76, 0x31, 0xa2, 0x02, 0x03, 0x44, 0x58, 0x58, 0xaa, 0x02, 0x06, 0x44, 0x77,
0x6e, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x06, 0x44, 0x77, 0x6e, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x12,
0x44, 0x77, 0x6e, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61,
0x74, 0x61, 0xea, 0x02, 0x07, 0x44, 0x77, 0x6e, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x33,
0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x85,
0x01, 0x0a, 0x0d, 0x4d, 0x73, 0x67, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65,
0x12, 0x36, 0x0a, 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20,
0x01, 0x28, 0x09, 0x42, 0x18, 0xd2, 0xb4, 0x2d, 0x14, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2e,
0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x09, 0x61,
0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x2c, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x61,
0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x64, 0x77, 0x6e, 0x2e, 0x76,
0x31, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x04, 0xc8, 0xde, 0x1f, 0x00, 0x52, 0x06,
0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x3a, 0x0e, 0x82, 0xe7, 0xb0, 0x2a, 0x09, 0x61, 0x75, 0x74,
0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x22, 0x17, 0x0a, 0x15, 0x4d, 0x73, 0x67, 0x49, 0x6e, 0x69,
0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32,
0x9a, 0x01, 0x0a, 0x03, 0x4d, 0x73, 0x67, 0x12, 0x48, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74,
0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x17, 0x2e, 0x64, 0x77, 0x6e, 0x2e, 0x76, 0x31,
0x2e, 0x4d, 0x73, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73,
0x1a, 0x1f, 0x2e, 0x64, 0x77, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x73, 0x67, 0x55, 0x70, 0x64,
0x61, 0x74, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
0x65, 0x12, 0x42, 0x0a, 0x0a, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x12,
0x15, 0x2e, 0x64, 0x77, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x73, 0x67, 0x49, 0x6e, 0x69, 0x74,
0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x1a, 0x1d, 0x2e, 0x64, 0x77, 0x6e, 0x2e, 0x76, 0x31, 0x2e,
0x4d, 0x73, 0x67, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x73,
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x1a, 0x05, 0x80, 0xe7, 0xb0, 0x2a, 0x01, 0x42, 0x77, 0x0a, 0x0a,
0x63, 0x6f, 0x6d, 0x2e, 0x64, 0x77, 0x6e, 0x2e, 0x76, 0x31, 0x42, 0x07, 0x54, 0x78, 0x50, 0x72,
0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f,
0x6d, 0x2f, 0x6f, 0x6e, 0x73, 0x6f, 0x6e, 0x72, 0x2f, 0x73, 0x6f, 0x6e, 0x72, 0x2f, 0x61, 0x70,
0x69, 0x2f, 0x64, 0x77, 0x6e, 0x2f, 0x76, 0x31, 0x3b, 0x64, 0x77, 0x6e, 0x76, 0x31, 0xa2, 0x02,
0x03, 0x44, 0x58, 0x58, 0xaa, 0x02, 0x06, 0x44, 0x77, 0x6e, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x06,
0x44, 0x77, 0x6e, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x12, 0x44, 0x77, 0x6e, 0x5c, 0x56, 0x31, 0x5c,
0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x07, 0x44, 0x77,
0x6e, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@ -1965,17 +1966,17 @@ var file_dwn_v1_tx_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
var file_dwn_v1_tx_proto_goTypes = []interface{}{
(*MsgUpdateParams)(nil), // 0: dwn.v1.MsgUpdateParams
(*MsgUpdateParamsResponse)(nil), // 1: dwn.v1.MsgUpdateParamsResponse
(*MsgSpawn)(nil), // 2: dwn.v1.MsgSpawn
(*MsgSpawnResponse)(nil), // 3: dwn.v1.MsgSpawnResponse
(*MsgInitialize)(nil), // 2: dwn.v1.MsgInitialize
(*MsgInitializeResponse)(nil), // 3: dwn.v1.MsgInitializeResponse
(*Params)(nil), // 4: dwn.v1.Params
}
var file_dwn_v1_tx_proto_depIdxs = []int32{
4, // 0: dwn.v1.MsgUpdateParams.params:type_name -> dwn.v1.Params
4, // 1: dwn.v1.MsgSpawn.params:type_name -> dwn.v1.Params
4, // 1: dwn.v1.MsgInitialize.params:type_name -> dwn.v1.Params
0, // 2: dwn.v1.Msg.UpdateParams:input_type -> dwn.v1.MsgUpdateParams
2, // 3: dwn.v1.Msg.Spawn:input_type -> dwn.v1.MsgSpawn
2, // 3: dwn.v1.Msg.Initialize:input_type -> dwn.v1.MsgInitialize
1, // 4: dwn.v1.Msg.UpdateParams:output_type -> dwn.v1.MsgUpdateParamsResponse
3, // 5: dwn.v1.Msg.Spawn:output_type -> dwn.v1.MsgSpawnResponse
3, // 5: dwn.v1.Msg.Initialize:output_type -> dwn.v1.MsgInitializeResponse
4, // [4:6] is the sub-list for method output_type
2, // [2:4] is the sub-list for method input_type
2, // [2:2] is the sub-list for extension type_name
@ -2015,7 +2016,7 @@ func file_dwn_v1_tx_proto_init() {
}
}
file_dwn_v1_tx_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*MsgSpawn); i {
switch v := v.(*MsgInitialize); i {
case 0:
return &v.state
case 1:
@ -2027,7 +2028,7 @@ func file_dwn_v1_tx_proto_init() {
}
}
file_dwn_v1_tx_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*MsgSpawnResponse); i {
switch v := v.(*MsgInitializeResponse); i {
case 0:
return &v.state
case 1:

View File

@ -20,7 +20,7 @@ const _ = grpc.SupportPackageIsVersion9
const (
Msg_UpdateParams_FullMethodName = "/dwn.v1.Msg/UpdateParams"
Msg_Spawn_FullMethodName = "/dwn.v1.Msg/Spawn"
Msg_Initialize_FullMethodName = "/dwn.v1.Msg/Initialize"
)
// MsgClient is the client API for Msg service.
@ -34,7 +34,7 @@ type MsgClient interface {
// Since: cosmos-sdk 0.47
UpdateParams(ctx context.Context, in *MsgUpdateParams, opts ...grpc.CallOption) (*MsgUpdateParamsResponse, error)
// Spawn spawns a new Vault
Spawn(ctx context.Context, in *MsgSpawn, opts ...grpc.CallOption) (*MsgSpawnResponse, error)
Initialize(ctx context.Context, in *MsgInitialize, opts ...grpc.CallOption) (*MsgInitializeResponse, error)
}
type msgClient struct {
@ -55,10 +55,10 @@ func (c *msgClient) UpdateParams(ctx context.Context, in *MsgUpdateParams, opts
return out, nil
}
func (c *msgClient) Spawn(ctx context.Context, in *MsgSpawn, opts ...grpc.CallOption) (*MsgSpawnResponse, error) {
func (c *msgClient) Initialize(ctx context.Context, in *MsgInitialize, opts ...grpc.CallOption) (*MsgInitializeResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(MsgSpawnResponse)
err := c.cc.Invoke(ctx, Msg_Spawn_FullMethodName, in, out, cOpts...)
out := new(MsgInitializeResponse)
err := c.cc.Invoke(ctx, Msg_Initialize_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@ -76,7 +76,7 @@ type MsgServer interface {
// Since: cosmos-sdk 0.47
UpdateParams(context.Context, *MsgUpdateParams) (*MsgUpdateParamsResponse, error)
// Spawn spawns a new Vault
Spawn(context.Context, *MsgSpawn) (*MsgSpawnResponse, error)
Initialize(context.Context, *MsgInitialize) (*MsgInitializeResponse, error)
mustEmbedUnimplementedMsgServer()
}
@ -90,8 +90,8 @@ type UnimplementedMsgServer struct{}
func (UnimplementedMsgServer) UpdateParams(context.Context, *MsgUpdateParams) (*MsgUpdateParamsResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method UpdateParams not implemented")
}
func (UnimplementedMsgServer) Spawn(context.Context, *MsgSpawn) (*MsgSpawnResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Spawn not implemented")
func (UnimplementedMsgServer) Initialize(context.Context, *MsgInitialize) (*MsgInitializeResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Initialize not implemented")
}
func (UnimplementedMsgServer) mustEmbedUnimplementedMsgServer() {}
func (UnimplementedMsgServer) testEmbeddedByValue() {}
@ -132,20 +132,20 @@ func _Msg_UpdateParams_Handler(srv interface{}, ctx context.Context, dec func(in
return interceptor(ctx, in, info, handler)
}
func _Msg_Spawn_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(MsgSpawn)
func _Msg_Initialize_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(MsgInitialize)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(MsgServer).Spawn(ctx, in)
return srv.(MsgServer).Initialize(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Msg_Spawn_FullMethodName,
FullMethod: Msg_Initialize_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(MsgServer).Spawn(ctx, req.(*MsgSpawn))
return srv.(MsgServer).Initialize(ctx, req.(*MsgInitialize))
}
return interceptor(ctx, in, info, handler)
}
@ -162,8 +162,8 @@ var Msg_ServiceDesc = grpc.ServiceDesc{
Handler: _Msg_UpdateParams_Handler,
},
{
MethodName: "Spawn",
Handler: _Msg_Spawn_Handler,
MethodName: "Initialize",
Handler: _Msg_Initialize_Handler,
},
},
Streams: []grpc.StreamDesc{},

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -20,6 +20,8 @@ const _ = grpc.SupportPackageIsVersion9
const (
Query_Params_FullMethodName = "/svc.v1.Query/Params"
Query_OriginExists_FullMethodName = "/svc.v1.Query/OriginExists"
Query_ResolveOrigin_FullMethodName = "/svc.v1.Query/ResolveOrigin"
)
// QueryClient is the client API for Query service.
@ -30,6 +32,10 @@ const (
type QueryClient interface {
// Params queries all parameters of the module.
Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error)
// OriginExists queries if a given origin exists.
OriginExists(ctx context.Context, in *QueryOriginExistsRequest, opts ...grpc.CallOption) (*QueryOriginExistsResponse, error)
// ResolveOrigin queries the domain of a given service and returns its record with capabilities.
ResolveOrigin(ctx context.Context, in *QueryResolveOriginRequest, opts ...grpc.CallOption) (*QueryResolveOriginResponse, error)
}
type queryClient struct {
@ -50,6 +56,26 @@ func (c *queryClient) Params(ctx context.Context, in *QueryParamsRequest, opts .
return out, nil
}
func (c *queryClient) OriginExists(ctx context.Context, in *QueryOriginExistsRequest, opts ...grpc.CallOption) (*QueryOriginExistsResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(QueryOriginExistsResponse)
err := c.cc.Invoke(ctx, Query_OriginExists_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *queryClient) ResolveOrigin(ctx context.Context, in *QueryResolveOriginRequest, opts ...grpc.CallOption) (*QueryResolveOriginResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(QueryResolveOriginResponse)
err := c.cc.Invoke(ctx, Query_ResolveOrigin_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
// QueryServer is the server API for Query service.
// All implementations must embed UnimplementedQueryServer
// for forward compatibility.
@ -58,6 +84,10 @@ func (c *queryClient) Params(ctx context.Context, in *QueryParamsRequest, opts .
type QueryServer interface {
// Params queries all parameters of the module.
Params(context.Context, *QueryParamsRequest) (*QueryParamsResponse, error)
// OriginExists queries if a given origin exists.
OriginExists(context.Context, *QueryOriginExistsRequest) (*QueryOriginExistsResponse, error)
// ResolveOrigin queries the domain of a given service and returns its record with capabilities.
ResolveOrigin(context.Context, *QueryResolveOriginRequest) (*QueryResolveOriginResponse, error)
mustEmbedUnimplementedQueryServer()
}
@ -71,6 +101,12 @@ type UnimplementedQueryServer struct{}
func (UnimplementedQueryServer) Params(context.Context, *QueryParamsRequest) (*QueryParamsResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Params not implemented")
}
func (UnimplementedQueryServer) OriginExists(context.Context, *QueryOriginExistsRequest) (*QueryOriginExistsResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method OriginExists not implemented")
}
func (UnimplementedQueryServer) ResolveOrigin(context.Context, *QueryResolveOriginRequest) (*QueryResolveOriginResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ResolveOrigin not implemented")
}
func (UnimplementedQueryServer) mustEmbedUnimplementedQueryServer() {}
func (UnimplementedQueryServer) testEmbeddedByValue() {}
@ -110,6 +146,42 @@ func _Query_Params_Handler(srv interface{}, ctx context.Context, dec func(interf
return interceptor(ctx, in, info, handler)
}
func _Query_OriginExists_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(QueryOriginExistsRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(QueryServer).OriginExists(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Query_OriginExists_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(QueryServer).OriginExists(ctx, req.(*QueryOriginExistsRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Query_ResolveOrigin_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(QueryResolveOriginRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(QueryServer).ResolveOrigin(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Query_ResolveOrigin_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(QueryServer).ResolveOrigin(ctx, req.(*QueryResolveOriginRequest))
}
return interceptor(ctx, in, info, handler)
}
// Query_ServiceDesc is the grpc.ServiceDesc for Query service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
@ -121,6 +193,14 @@ var Query_ServiceDesc = grpc.ServiceDesc{
MethodName: "Params",
Handler: _Query_Params_Handler,
},
{
MethodName: "OriginExists",
Handler: _Query_OriginExists_Handler,
},
{
MethodName: "ResolveOrigin",
Handler: _Query_ResolveOrigin_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "svc/v1/query.proto",

View File

@ -14,10 +14,10 @@ import (
type GatewayContext struct {
echo.Context
*hwayorm.Queries
agent useragent.UserAgent
hwayorm.Querier
id string
ipfsClient common.IPFS
agent useragent.UserAgent
tokenStore common.IPFSTokenStore
stagedEnclaves map[string]mpc.Enclave
grpcAddr string
@ -38,12 +38,12 @@ func UseGateway(env hway.Hway, ipc common.IPFS, db *hwayorm.Queries) echo.Middle
ua := useragent.NewParser()
ctx := &GatewayContext{
Context: c,
turnstileSiteKey: env.GetTurnstileSiteKey(),
agent: ua.Parse(c.Request().UserAgent()),
Queries: db,
Querier: db,
ipfsClient: ipc,
agent: ua.Parse(c.Request().UserAgent()),
grpcAddr: env.GetSonrGrpcUrl(),
tokenStore: common.NewUCANStore(ipc),
turnstileSiteKey: env.GetTurnstileSiteKey(),
}
return next(ctx)
}

View File

@ -23,7 +23,7 @@ func UpdateProfile(c echo.Context) (*hwayorm.Profile, error) {
if err != nil {
return nil, err
}
return &profile, nil
return profile, nil
}
func ReadProfile(c echo.Context) (*hwayorm.Profile, error) {
@ -36,7 +36,7 @@ func ReadProfile(c echo.Context) (*hwayorm.Profile, error) {
if err != nil {
return nil, err
}
return &profile, nil
return profile, nil
}
func DeleteProfile(c echo.Context) error {

View File

@ -32,7 +32,7 @@ func (c *CredentialDescriptor) ToModel(handle, origin string) *hwayorm.Credentia
}
}
func CredentialArrayToDescriptors(credentials []hwayorm.Credential) []*CredentialDescriptor {
func CredentialArrayToDescriptors(credentials []*hwayorm.Credential) []*CredentialDescriptor {
var descriptors []*CredentialDescriptor
for _, cred := range credentials {
cd := &CredentialDescriptor{

View File

@ -1 +0,0 @@
onsonr.dev

View File

@ -1,631 +0,0 @@
## v0.5.27 (2024-12-16)
## v0.5.26 (2024-12-13)
### Fix
- Correct regular expression for version tags in release workflow
## v0.5.25 (2024-12-11)
### Feat
- enable GoReleaser releases on tags and snapshots
- automate release on tag and workflow dispatch
## v0.5.24 (2024-12-11)
### Feat
- prevent duplicate releases
## v0.5.23 (2024-12-11)
### Refactor
- rename scheduled release workflow to versioned release
- remove changelog from release artifacts
## v0.5.22 (2024-12-11)
### Feat
- Implement passkey-based authentication and registration flow
## v0.5.21 (2024-12-11)
### Feat
- allow manual triggering of deployment workflow
- add start-tui command for interactive mode
- add coin selection and update passkey input in registration form
- add hway command for Sonr DID gateway
- Conditionally install process-compose only if binary not found
- Add process-compose support with custom start and down commands
- implement passkey registration flow
- Improve createProfile form layout with wider max-width and enhanced spacing
- improve index page UI with new navigation buttons and remove redundant settings buttons
- Make input rows responsive with grid layout for mobile and desktop
- enhance index page with additional settings buttons and style adjustments
- implement passkey-based authentication
- add support for Cloudsmith releases
- add go dependency and enhance devbox environment variables
- update create profile form placeholders and handle
- add DID-based authentication middleware
- Add validation for human verification slider sum in CreateProfile form
- implement passkey registration flow
- Update WebAuthn credential handling with modern browser standards
- Streamline passkey registration with automatic form submission
- Add credential parsing and logging in register finish handler
- Add credential details row with icon after passkey creation
- Add form validation for passkey credential input
- implement passkey registration flow
- Add hidden input to store passkey credential data for form submission
- add CI workflow for deploying network
- add hway binary support and Homebrew formula
- remove username from passkey creation
- implement passkey registration flow
- add passkey creation functionality
- add CNAME for onsonr.dev domain
### Fix
- use Unix domain sockets for devnet processes
- correct workflow name and improve devnet deployment process
- correct title of profile creation page
- rename devbox start script to up and remove stop script
- Consolidate archive configuration and add LICENSE file
- Improve cross-browser passkey credential handling and encoding
- Remove commented-out code in passkey registration script
- remove line-clamp from tailwind config
- remove unnecessary background and restart settings from process-compose.yaml
- suppress process-compose server output and log to file
### Refactor
- remove unnecessary git fetch step in deploy workflow
- remove obsolete interchain test dependencies
- update index views to use new nebula components
- move Wasm related code to pkg/common/wasm
- migrate config package to pkg directory
- migrate to new configuration system and model definitions
- move session package to pkg directory
- Refactor registration forms to use UI components
- move gateway config to vault package
- improve command line flag descriptions and variable names
- refactor hway command to use echo framework for server
- Update root command to load EnvImpl from cobra flags
- Modify command flags and environment loading logic in cmds.go
- improve build process and move process-compose.yaml
- remove unused devbox.json and related configurations
- Improve mobile layout responsiveness for Rows and Columns components
- Remove max-w-fit from Rows component
- replace session package with context package
- rename database initialization function
- move session management to dedicated database module
- remove unused UI components related to wallet and index pages
- consolidate handlers into single files
- move gateway and vault packages to internal directory
- Move registration form components to dedicated directory
- remove unused devbox package
- remove devbox configuration
- move vault package to app directory
- improve code structure within gateway package
- move gateway package to app directory
- move vault package internal components to root
- migrate layout imports to common styles package
- Move form templates and styles to common directory
- consolidate authentication and DID handling logic
- Improve WebAuthn credential handling and validation in register finish route
- remove profile card component
- Simplify passkey registration UI and move profile component inline
- Update credential logging with transport and ID type
- Update register handler to use protocol.CredentialDescriptor struct
- Update credential handling to use protocol.CredentialDescriptor
- improve profile card styling and functionality
- Simplify session management and browser information extraction
- Update PeerInfo to extract and store comprehensive device information
- improve address display in property details
- remove unused documentation generation script
- replace sonr/pkg/styles/layout with nebula/ui/layout
- migrate UI components to nebula module
- improve scopes.json structure and update scripts for better usability
## v0.5.20 (2024-12-07)
### Refactor
- simplify CI workflow by removing redundant asset publishing steps
## v0.5.19 (2024-12-06)
### Feat
- add support for parent field and resources list in Capability message
- add fast reflection methods for Capability and Resource
- add gum package and update devbox configuration
- add new button components and layout improvements
### Fix
- adjust fullscreen modal close button margin
- update devbox lockfile
- resolve rendering issue in login modal
### Refactor
- rename accaddr package to address
- Update Credential table to match WebAuthn Credential Descriptor
- Deployment setup
- migrate build system from Taskfile to Makefile
- rename Assertion to Account and update related code
- remove unused TUI components
- Move IPFS interaction functions to common package
- remove dependency on DWN.pkl
- remove unused dependencies and simplify module imports
- Rename x/vault -> x/dwn and x/service -> x/svc
- move resolver formatter to services package
- remove web documentation
- update devbox configuration and scripts
- rename layout component to root
- refactor authentication pages into their own modules
- update templ version to v0.2.778 and remove unused air config
- move signer implementation to mpc package
## v0.5.18 (2024-11-06)
## v0.5.17 (2024-11-05)
### Feat
- add remote client constructor
- add avatar image components
- add SVG CDN Illustrations to marketing architecture
- **marketing**: refactor marketing page components
- Refactor intro video component to use a proper script template
- Move Alpine.js script initialization to separate component
- Add intro video modal component
- add homepage architecture section
- add Hero section component with stats and buttons
- **css**: add new utility classes for group hover
- implement authentication register finish endpoint
- add controller creation step to allocate
- Update service module README based on protobuf files
- Update x/macaroon/README.md with details from protobuf files
- update Vault README with details from proto files
### Fix
- update file paths in error messages
- update intro video modal script
- include assets generation in wasm build
### Refactor
- update marketing section architecture
- change verification table id
- **proto**: remove macaroon proto
- rename ValidateBasic to Validate
- rename session cookie key
- remove unused sync-initial endpoint
- remove formatter.go from service module
## v0.5.16 (2024-10-21)
## v0.5.15 (2024-10-21)
## v0.5.14 (2024-10-21)
### Refactor
- remove StakingKeeper dependency from GlobalFeeDecorator
## v0.5.13 (2024-10-21)
### Feat
- add custom secp256k1 pubkey
### Refactor
- update gRPC client to use new request types
- use RawPublicKey instead of PublicKey in macaroon issuer
- improve error handling in DID module
## v0.5.12 (2024-10-18)
### Feat
- add User-Agent and Platform to session
- introduce AuthState enum for authentication state
### Fix
- **version**: revert version bump to 0.5.11
- **version**: update version to 0.5.12
### Refactor
- remove dependency on proto change detection
- update asset publishing configuration
## v0.5.11 (2024-10-10)
### Feat
- nebula assets served from CDN
- use CDN for nebula frontend assets
- add static hero section content to homepage
- add wrangler scripts for development, build, and deployment
- remove build configuration
- move gateway web code to dedicated directory
- add PubKey fast reflection
- **macaroon**: add transaction allowlist/denylist caveats
- add PR labeler
- **devbox**: remove hway start command
- add GitHub Actions workflow for running tests
- add workflow for deploying Hway to Cloudflare Workers
- Publish configs to R2
- integrate nebula UI with worker-assets-gen
- extract reusable layout components
- Implement service worker for IPFS vault
- implement CDN support for assets
- add payment method support
- add support for public key management
- add ModalForm component
- add LoginStart and RegisterStart routes
- implement authentication views
- add json tags to config structs
- implement templ forms for consent privacy, credential assert, credential register, and profile details
- **vault**: introduce assembly of the initial vault
- add client logos to homepage
- add tailwind utility classes
- implement new profile card component
### Fix
- Correct source directory for asset publishing
- install dependencies before nebula build
- update Schema service to use new API endpoint
- fix broken logo image path
### Refactor
- remove unnecessary branch configuration from scheduled release workflow
- update dwn configuration generation import path
- use nebula/routes instead of nebula/global
- move index template to routes package
- remove cdn package and move assets to global styles
- move nebula assets to hway build directory
- remove docker build and deployment
- rename internal/session package to internal/ctx
- remove unused fields from
- rename PR_TEMPLATE to PULL_REQUEST_TEMPLATE
- remove devbox.json init hook
- rename sonrd dockerfile to Dockerfile
- remove unused dependency
- rename 'global/cdn' to 'assets'
- move CDN assets to separate folder
- move Pkl module definitions to dedicated package
- move CDN assets to js/ folder
- remove unused component templates
- move ui components to global
- move view handlers to router package
## v0.5.10 (2024-10-07)
### Feat
- **blocks**: remove button component
## v0.5.9 (2024-10-06)
### Feat
- add Motr support
- update UIUX PKL to utilize optional fields
### Fix
- Update source directory for asset publishing
## v0.5.8 (2024-10-04)
### Refactor
- Remove unused logs configuration
## v0.5.7 (2024-10-04)
### Feat
- **devbox**: use process-compose for testnet services
- remove motr.mjs dependency
- add markdown rendering to issue templates
- update issue templates for better clarity
- add issue templates for tracking and task issues
- add issue templates for bug report and tracking
- introduce docker-compose based setup
### Refactor
- update issue template headings
- rename bug-report issue template to bug
## v0.5.6 (2024-10-03)
### Feat
- add hway and sonr processes to dev environment
## v0.5.5 (2024-10-03)
### Feat
- add rudimentary DidController table
- update home section with new features
- introduce Home model and refactor views
- **nebula**: create Home model for home page
### Refactor
- reorganize pkl files for better separation of concerns
- rename msg_server_test.go to rpc_test.go
## v0.5.4 (2024-10-02)
## v0.5.3 (2024-10-02)
### Fix
- remove unnecessary telegram message template
## v0.5.2 (2024-10-02)
### Feat
- **service**: integrate group module (#1104)
### Refactor
- revert version bump to 0.5.1
## v0.5.1 (2024-10-02)
### Refactor
- move Motr API to state package
## v0.5.0 (2024-10-02)
### Feat
- allow multiple macaroons with the same id
## v0.4.5 (2024-10-02)
### Fix
- use correct secret for docker login
## v0.4.4 (2024-10-02)
## v0.4.3 (2024-10-02)
### Feat
- **release**: add docker images for sonrd and motr
- update homepage with new visual design
- add DID to vault genesis schema
- add video component
- add video component
- add hx-get attribute to primary button in hero section
### Fix
- **layout**: add missing favicon
- **hero**: Use hx-swap for primary button to prevent flicker
### Refactor
- use single GITHUB_TOKEN for release workflow
- update workflow variables
## v0.4.2 (2024-10-01)
### Refactor
- use single GITHUB_TOKEN for release workflow
## v0.4.1 (2024-10-01)
### Feat
- Implement session management
- allow manual release triggers
- add Input and RegistrationForm models
- add new utility classes
- add login and registration pages
- add tailwindcss utilities
- add support for ARM64 architecture
- add DWN resolver field
- add stats section to homepage
- implement hero section using Pkl
- add PKL schema for message formats
- add Homebrew tap for sonr
- update release workflow to use latest tag
### Fix
- **version**: update version number to 0.4.0
- update release workflow to use latest tag
- **versioning**: revert version to 0.9.0
- **cta**: Fix typo in CTA title
- change bento section title to reflect security focus
- adjust hero image dimensions
- **Input**: Change type from to
- update hero image height in config.pkl
### Refactor
- move home page sections to home package
- rename motrd to motr
- update hero image dimensions
- move nebula configuration to static file
- rename buf-publish.yml to publish-assets.yml
- remove unused field from
## v0.4.0 (2024-09-30)
### Feat
- **dwn**: add wasm build for dwn
- add macaroon and oracle genesis states
- add scheduled binary release workflow
- introduce process-compose for process management
- add counter animation to hero section
- add registration page
### Fix
- Enable scheduled release workflow
### Refactor
- remove old changelog entries
- remove unnecessary checkout in scheduled-release workflow
- rename build ID to sonr
- remove unnecessary release existence check
- move dwn wasm build to pkg directory
## v0.3.1 (2024-09-29)
### Refactor
- move nebula/pages to pkg/nebula/pages
## v0.3.0 (2024-09-29)
### Feat
- add buf.lock for proto definitions
### Fix
- remove unused linting rules
- update proto breaking check target to master branch
### Refactor
- remove unused lock files and configurations
## v0.2.0 (2024-09-29)
### Feat
- disable goreleaser workflow
- update workflows to include master branch
- remove global style declaration
- **oracle**: add oracle module
- optimize IPFS configuration for better performance
- add local IPFS bootstrap script and refactor devbox config
- add AllocateVault HTTP endpoint
- add WebAuthn credential management functionality
- remove unused coins interface
- remove global integrity proof from genesis state
- add vault module
- enable buf.build publishing on master and develop branches
- add Gitflow workflow for syncing branches
- add automated production release workflow
- **ui**: implement profile page
- add automated production release workflow
- **did**: remove unused proto files
- add enums.pulsar.go file for PermissionScope enum (#4)
- add initial DID implementation
- remove builder interface
- add basic UI for block explorer
- add Usage: pkl [OPTIONS] COMMAND [ARGS]...
- use SQLite embedded driver
- add DID method for each coin
- Expand KeyType enum and update KeyInfo message in genesis.proto
- Add whitelisted key types to genesis params
- Add DID grants protobuf definition
- Add fields to KeyInfo struct to distinguish CBOR and standard blockchain key types
- Add new message types for AssetInfo, ChainInfo, Endpoint, ExplorerInfo, FeeInfo, and KeyInfo
- run sonr-node container in testnet network and make network external
- Add docker-compose.yaml file to start a Sonr testnet node
- configure Sonr testnet environment
- Update Dockerfile to start and run a testnet
- add Equal methods for AssetInfo and ChainInfo types
- Add ProveWitness and SyncVault RPCs
- Add MsgRegisterService to handle service registration
- Add MsgRegisterService to handle service registration
- add enums.pulsar.go file for PermissionScope enum
### Fix
- ensure go version is up-to-date
- use GITHUB_TOKEN for version bump workflow
- update account table interface to use address, chain and network
- **ci**: update docker vm release workflow with new token
- use mnemonic phrases for test account keys
- reduce motr proxy shutdown timeout
- **nebula**: use bunx for tailwindcss build
- **proto**: update protobuf message index numbers
- **ante**: reduce POA rate floor and ceiling
- Update proc_list_width in mprocs.yaml
- Add service to database when registering
- pin added did documents to local ipfs node
- remove extra spaces in typeUrl
- **release**: remove unnecessary quotes in tag pattern
- remove unused imports and simplify KeyInfo message
- bind node ports to localhost
- Update docker-compose network name to dokploy-network
- Update network name to dokploy
- remove unused port mapping
- Update docker-compose.yaml to use correct volume path
- update docker-compose volume name
- Update docker-compose.yaml to use shell directly for sonrd command
- replace "sh" with "/bin/sh" in docker-compose.yaml command
- Update runner image dependencies for debian-11
- **deps**: update golang image to 1.21
- **chains**: update nomic chain build target
- Remove unused `Meta` message from `genesis.proto`
- Add ProveWitness and SyncVault RPCs
### Refactor
- adjust source directory for config files (#1102)
- Use actions/checkout@v4
- remove unused master branch from CI workflow
- rename github token secret
- remove unnecessary x-cloak styles
- optimize oracle genesis proto
- remove unused code related to whitelisted assets
- update buf publish source directory
- adjust devbox configuration to reflect nebula changes
- rename msg_server.go to rpc.go
- remove devbox integration
- move dwn package to app/config
- move configuration files to app directory
- extract root command creation to separate file
- move ipfs setup to function
- remove unnecessary proxy config
- rename script to
- move DWN proxy server logic to separate file
- use htmx instead of dwn for vault client
- remove unused environment variables
- simplify verification method structure
- use staking keeper in DID keeper
- remove unused dependencies
- remove unused image building workflow
- add field to
- Update KeyKind Enum to have proper naming conventions
- Update `DIDNamespace` to have proper naming convention
- expose ports directly in docker-compose
- remove unused port mappings
- streamline script execution
- use CMD instead of ENTRYPOINT in Dockerfile
- **deps**: Upgrade Debian base image to 11
- Simplify the types and properties to keep a consistent structure for the blockchain
- remove PERMISSION_SCOPE_IDENTIFIERS_ENS enum value

View File

@ -1,104 +0,0 @@
## `x/did` - Auth & AuthZ
> The DID module is responsible for managing the creation and management of DIDs.
> Controllers represent on-chain accounts backed by a MPC keypair. Controllers
> provide methods for Wallet Account Abstraction (WAA) and are responsible for
> managing the creation and management of DIDs for an individual user.
### Features
- DID Controllers leverage the Cosmos SDK's `x/accounts` std interface for WAA.
- DIDs are represented by a `x/did` controller and are required to state the
controller's public key, and which map to the controller's capabilities.
- General Sign/Verify methods are provides from the QueryServer for HTTP requests.
- The Execute method is used to broadcast transactions across the network. (TODO)
- Biscuits are used to authenticate and authorize requests between services. (TODO)
### References
- [State](https://github.com/onsonr/sonr/tree/develop/x/did#state)
- [State Transitions](https://github.com/onsonr/sonr/tree/develop/x/did#state-transitions)
- [Messages](https://github.com/onsonr/sonr/tree/develop/x/did#messages)
- [Queries](https://github.com/onsonr/sonr/tree/develop/x/did#query)
- [Params](https://github.com/onsonr/sonr/tree/develop/x/did#params)
- [Client](https://github.com/onsonr/sonr/tree/develop/x/did#client)
- [Future Improvements](https://github.com/onsonr/sonr/tree/develop/x/did#future-improvements)
- [Tests](https://github.com/onsonr/sonr/tree/develop/x/did#tests)
- [Appendix](https://github.com/onsonr/sonr/tree/develop/x/did#appendix)
---
## `x/macaroon`
> The macaroon module is responsible for issuing and verifying macaroons. Macaroons
> are used to authenticate and authorize requests between services.
> Macaroons are requested by NFT Records from [`x/service`](2--Modules-Overview.md#x-service) and granted by controllers from [`x/did`](2--Modules-Overview.md#x/did)
### Features
- On Controller creation, a macaroon is created with an admin scope and a default expiry of _315,569,520 blocks (or ~10 years)_.
- On Service registration, a macaroon is created with a service scope and a default expiry of _31,556,952 blocks (or ~1 year)_.
- Macaroons contain the scope of access for a service and the expiry of the permissions in `blockHeight`.
### References
- [State](https://github.com/onsonr/sonr/tree/develop/x/macaroon#state)
- [State Transitions](https://github.com/onsonr/sonr/tree/develop/x/macaroon#state-transitions)
- [Messages](https://github.com/onsonr/sonr/tree/develop/x/macaroon#messages)
- [Queries](https://github.com/onsonr/sonr/tree/develop/x/macaroon#query)
- [Params](https://github.com/onsonr/sonr/tree/develop/x/macaroon#params)
- [Client](https://github.com/onsonr/sonr/tree/develop/x/macaroon#client)
- [Future Improvements](https://github.com/onsonr/sonr/tree/develop/x/macaroon#future-improvements)
- [Tests](https://github.com/onsonr/sonr/tree/develop/x/macaroon#tests)
- [Appendix](https://github.com/onsonr/sonr/tree/develop/x/macaroon#appendix)
---
## `x/service`
> The service module is responsible for managing decentralized services. Services
> on the Sonr network are essentially on-chain MultiSig wallets that are
> represented by a NFT. Service admins are represented by
> a [`x/did`](2--Modules-Overview.md#x-did) controller and are required to state
> the service's scope of access, and which map to the services' capabilities.
### Features
- Needs a Valid Domain with .htaccess file to be whitelisted.
### References
- [State](https://github.com/onsonr/sonr/tree/develop/x/service#state)
- [State Transitions](https://github.com/onsonr/sonr/tree/develop/x/service#state-transitions)
- [Messages](https://github.com/onsonr/sonr/tree/develop/x/service#messages)
- [Queries](https://github.com/onsonr/sonr/tree/develop/x/service#query)
- [Params](https://github.com/onsonr/sonr/tree/develop/x/service#params)
- [Client](https://github.com/onsonr/sonr/tree/develop/x/service#client)
- [Future Improvements](https://github.com/onsonr/sonr/tree/develop/x/service#future-improvements)
- [Tests](https://github.com/onsonr/sonr/tree/develop/x/service#tests)
- [Appendix](https://github.com/onsonr/sonr/tree/develop/x/service#appendix)
---
## `x/vault`
> The vault module is responsible for managing the storage and acccess-control of
> Decentralized Web Nodes (DWNs) from IPFS. Vaults contain user-facing keys and
> are represented by a [`x/did`](2--Modules-Overview.md#x-did) controller.
### Features
- Vaults can be created by anyone, but efforts are made to restrict 1 per user.
- Vaults are stored in IPFS and when claimed, the bech32 Sonr Address is pinned to IPFS.
### References
- [State](https://github.com/onsonr/sonr/tree/develop/x/vault#state)
- [State Transitions](https://github.com/onsonr/sonr/tree/develop/x/vault#state-transitions)
- [Messages](https://github.com/onsonr/sonr/tree/develop/x/vault#messages)
- [Queries](https://github.com/onsonr/sonr/tree/develop/x/vault#query)
- [Params](https://github.com/onsonr/sonr/tree/develop/x/vault#params)
- [Client](https://github.com/onsonr/sonr/tree/develop/x/vault#client)
- [Future Improvements](https://github.com/onsonr/sonr/tree/develop/x/vault#future-improvements)
- [Tests](https://github.com/onsonr/sonr/tree/develop/x/vault#tests)
- [Appendix](https://github.com/onsonr/sonr/tree/develop/x/vault#appendix)

View File

@ -1,170 +0,0 @@
# Consumer Chain Launch Process
This guide is intended for consumer chain teams that are looking to be onboarded on to the Interchain Security testnet.
## Interchain Security Testnet Overview
- The Interchain Security (ICS) testnet is to be used to launch and test consumer chains. We recommend consumer chains to launch on the testnet before launching on the mainnet.
- All information about the ICS testnet is available in this [repository](https://github.com/cosmos/testnets/tree/master/interchain-security).
- The testnet coordinators (Hypha) have majority voting power in the ICS testnet. This means we need to work with you to bring your chain live and also to successfully pass any governance proposals you make.
## Chain Onboarding Process
For teams looking to join the ICS testnet, the onboarding process can be broken down in four phases:
- Testing and Integration
- Planning with Testnet Coordinators
- Proposal Submission
- Chain Launch
### Local Testing and Integration
During this phase, your team will run integration tests with the following elements of an Interchain Security testnet:
- Gaia provider chain
- Visit the provider chain [page](./provider/) for details on which Gaia version is currently being used.
- Relayers
- You will be responsible for running the relayer that relays the first set of Validator Set Change packets between provider and consumer chain. You should be proficient in setting up and running either [Hermes](https://github.com/informalsystems/hermes) or [rly](https://github.com/cosmos/relayer).
By the end of this phase, you are able to launch a consumer chain within a local testnet or CI workflow that resembles the testnet (or mainnet) environment.
### Planning with Testnet Coordinators
Once you have a binary release ready, you can begin planning the launch with the testnet coordinators (Hypha).
The goals of this phase are to update this repository with all the information validators need to join the network and to produce a `consumer-addition` proposal to be submitted in the provider chain.
We expect you to run the minimum infrastructure required to make your consumer chain usable by testnet participants. This means running:
1. **Seed/persistent nodes**
2. **Relayer** it must be launched before the chain times out, preferably right after blocks start being produced.
- **IMPORTANT**: Make sure you have funds to pay gas fees for the relayer. You will likely need to set up an adequately funded genesis account for this purpose.
Additionally, you may want to run:
- a faucet such as this simple [REST faucet](https://github.com/hyphacoop/cosmos-rest-faucet) (it may need a separate funded account in the genesis file as well)
- a block explorer such as [ping.pub](https://github.com/ping-pub/explorer)
## ✍️ Submitting a PR for a new chain
Each consumer chain gets its own directory. You can use the [`slasher`](./stopped/slasher/) chain as reference. Feel free to clone the slasher directory, modify it for your consumer chain, and make a PR with the relevant information.
Hypha will be reviewing the PR to ensure it meets the following criteria:
#### README includes:
- [ ] Consumer chain repo and release or tag name.
- [ ] Build instructions for chain binary.
- [ ] Checksum of genesis file without CCV.
- [ ] Checksum of reference binary.
- [ ] Instructions on to join
- [ ] Installation steps
- Endpoints
- [ ] Seeds OR persistent peers
- [ ] State sync nodes (if any)
See the `slasher` chain [page](./stopped/slasher) for reference.
#### `chain_id` must be identical in the following places:
- [ ] `README`
- [ ] genesis file
- [ ] consumer addition proposal
- [ ] bash script
We recommend choosing a `chain_id` with the suffix `-1`, even if it's a subsequent test of the same chain, e.g. `testchain-second-rehearsal-1`.
#### Binary checksum validation
- [ ] `shasum -a 256 <binary>` matches the checksum in the proposal
- [ ] `shasum -a 256 <binary>` matches `README`
#### Bash script
- [ ] version built in script must match `README`
- [ ] seeds or persistent peers must match `README`
#### Genesis file
- [ ] Genesis time must match spawn time in the `consumer-addition` proposal
- [ ] Accounts and balances: Properly funded accounts (e.g., gas fees for relayer, faucet, etc.)
- [ ] Bank balance denom matches denom in `README`
- [ ] Slashing parameters: Set `signed_blocks_window` and `min_signed_per_window` adequately to ensure validators have at least 12 hours to join the chain after launch without getting jailed
- [ ] `shasum -a 256 <genesis file without CCV>` matches the checksum in the proposal
- [ ] `shasum -a 256 <genesis file without CCV>` matches the checksum in the `README`
- [ ] The genesis file is correctly formed: `<consumer binary or gaiad> validate-genesis /path/to/genesis-without-ccv.json` returns without error
See the `slasher` chain [genesis](./stopped/slasher/slasher-genesis-without-ccv.json) for reference.
#### `consumer-addition` proposal
- [ ] Spawn time must match genesis time
- [ ] Spawn time must be later than voting period
- [ ] `revision_height: 1`
- [ ] `revision_number: 1` (only if the `chain_id` ends in `-1`)
- [ ] `transfer_timeout_period: 1800000000000`. This value should be smaller than `blocks_per_distribution_transmission * block_time`.
- [ ] `ccv_timeout_period: 2419200000000000`. This value must be larger than the unbonding period, the default is 28 days.
- [ ] `unbonding_period: 1728000000000000` (given current provider params)
See the `slasher` chain consumer-addition [proposal](./stopped/slasher/proposal-slasher.json) and [Interchain Security time-based parameters](https://github.com/cosmos/interchain-security/blob/main/docs/params.md#time-based-parameters) for reference.
#### Node configurations
- [ ] `minimum_gas_prices`
- [ ] Check with Hypha about any other chain-specific params
---
### On-chain Proposal Submission
When you make your proposal, please let us know well in advance. The current voting period is five minutes, which means well need to vote right after you submit your proposal. We recommend submitting the proposal together with us on a call.
The following will take place during the proposal submission phase:
- Your team will submit the `consumer-addition` proposal with a command that looks like this:
```
gaiad tx gov submit-legacy-proposal consumer-addition proposal.json --from <account name> --chain-id provider --gas auto --fees 500uatom -b block -y
```
- Testnet coordinators will vote on it shortly afterwards to make sure it passes.
- You will open a pull request to add the new consumer chain entry to this repo and update the [schedule page](SCHEDULE.md) with the launch date.
- You will announce the upcoming launch, including the spawn time, in the Interchain Security `announcements` channel of the Cosmos Network Discord Server. If you need permissions for posting, please reach out to us.
### Chain Launch
After the spawn time is reached, the Cross-Chain Validation (CCV) state will be available on the provider chain and the new IBC client will be created. At this point, you will be able to:
- Collect the Cross-Chain Validation (CCV) state from the provider chain.
```
gaiad q provider consumer-genesis <chain-id> -o json > ccv-state.json
```
- Update the genesis file with the CCV state.
```
jq -s '.[0].app_state.ccvconsumer = .[1] | .[0]' <consumer genesis without CCV state> ccv-state.json > <consumer genesis file with CCV state>
```
- Publish the genesis file with CCV state to the testnets repo.
- Post the link to the genesis file and the SHA256 hash to the Interchain Security `interchain-security-testnet` channel of the Cosmos Network Discord Server.
- Ensure the required peers are online for people to connect to.
The consumer chain will start producing blocks as soon as 66.67% of the provider chain's voting power comes online. You will be able to start the relayer afterwards:
- Query the IBC client ID of the provider chain.
```
gaiad q provider list-consumer-chains
```
- Create the required IBC connections and channels for the CCV channel to be established. Using Hermes:
```
hermes create connection --a-chain <consumer chain ID> --a-client 07-tendermint-0 --b-client <provider chain client ID>
hermes create channel --a-chain <consumer chain ID> --a-port consumer --b-port provider --order ordered --a-connection connection-0 --channel-version 1
```
- Start the relayer
- The trusting period fraction is set to `0.25` on the provider chain, so you should use a trusting period of 5 days in your relayer configuration.
Finally, the testnet coordinators will:
- Trigger a validator set update in the provider chain to establish the CCV channel and verify the validator set has been updated in the consumer chain.
- Announce the chain is interchain secured.
- Update the testnets repo with the IBC information.
## Talk to us
If you're a consumer chain looking to launch, please get in touch with Hypha. You can reach Lexa Michaelides at `lexa@hypha.coop` or on Telegram.

View File

@ -1,10 +0,0 @@
> With increasingly sensitive information being stored in centralized databases, we
> believe that a decentralized anonymity mechanism is the only way to protect user data.
> Sonr is at its core a peer-to-peer identity system, which means that users can choose
> to share their identity with others in a way that is private and secure.
# Decentralized Identifiers
# Cross-chain Interoperability
# W3C Web APIs

View File

@ -1,21 +0,0 @@
> Sonr is a decentralized platform that allows users to create and manage their own decentralized identity.
# Blockchain: Sonr
Sonr stores Decentralized Identifiers (DIDs) on its Cosmos-sdk based blockchain. The blockchain's role is to act as the
persistent pointer store for locations of User owned data.
# User Key Vault: Motr
The Motr node is a service-worker which functions as a personal encrypted key-enclave for users stored on IPFS. They can be allocated and persisted on the
Sonr blockchain for Smart Wallet functionality.
# Network Gateway: Hway
The Hway protocol is a network proxy which routes network requests to the appropriate service endpoint. This is used for seamless communication between
Blockchain Nodes, Decentralized Applications, and User Nodes.
# Design System: Nebula
Built with Golang-Templ, TailwindCSS, HTMX, and Service Workers - Nebula is a component library which allows for
consistent UX across the entire ecosystem.

View File

@ -1,13 +0,0 @@
> The `$SNR` token is the native platform token of the Sonr network. It is used by services to
> pay for Authentication and Authorization services. The system is designed for developers to
> be similar to centralized authentication providers like Google, Facebook, Okta, etc.
# Usage
The Sonr blockchain is a Delegated Proof of Stake (DPoS) blockchain built with the Cosmos-sdk.
# Supply
> The total supply of `$SNR` is fixed at 1 billion.
![image](https://github.com/user-attachments/assets/8b9d6e6b-f3e5-464a-9032-6d8fe257a748)

View File

@ -1,11 +0,0 @@
> In order to maintain a tight-knit experience, we designed Sonr to operate completely
> in the point-of-view of the user. This led to us building a Component Library which
> creates consistent UX across the entire ecosystem.
# Overview
The Sonr blockchain is a Delegated Proof of Stake (DPoS) blockchain built with the Cosmos-sdk.
# Nebula Package
> The total supply of `$SNR` is fixed at 1 billion.

View File

@ -1,104 +0,0 @@
## `x/did` - Auth & AuthZ
> The DID module is responsible for managing the creation and management of DIDs.
> Controllers represent on-chain accounts backed by a MPC keypair. Controllers
> provide methods for Wallet Account Abstraction (WAA) and are responsible for
> managing the creation and management of DIDs for an individual user.
### Features
- DID Controllers leverage the Cosmos SDK's `x/accounts` std interface for WAA.
- DIDs are represented by a `x/did` controller and are required to state the
controller's public key, and which map to the controller's capabilities.
- General Sign/Verify methods are provides from the QueryServer for HTTP requests.
- The Execute method is used to broadcast transactions across the network. (TODO)
- Biscuits are used to authenticate and authorize requests between services. (TODO)
### References
- [State](https://github.com/onsonr/sonr/tree/develop/x/did#state)
- [State Transitions](https://github.com/onsonr/sonr/tree/develop/x/did#state-transitions)
- [Messages](https://github.com/onsonr/sonr/tree/develop/x/did#messages)
- [Queries](https://github.com/onsonr/sonr/tree/develop/x/did#query)
- [Params](https://github.com/onsonr/sonr/tree/develop/x/did#params)
- [Client](https://github.com/onsonr/sonr/tree/develop/x/did#client)
- [Future Improvements](https://github.com/onsonr/sonr/tree/develop/x/did#future-improvements)
- [Tests](https://github.com/onsonr/sonr/tree/develop/x/did#tests)
- [Appendix](https://github.com/onsonr/sonr/tree/develop/x/did#appendix)
---
## `x/macaroon`
> The macaroon module is responsible for issuing and verifying macaroons. Macaroons
> are used to authenticate and authorize requests between services.
> Macaroons are requested by NFT Records from [`x/service`](2--Modules-Overview.md#x-service) and granted by controllers from [`x/did`](2--Modules-Overview.md#x/did)
### Features
- On Controller creation, a macaroon is created with an admin scope and a default expiry of _315,569,520 blocks (or ~10 years)_.
- On Service registration, a macaroon is created with a service scope and a default expiry of _31,556,952 blocks (or ~1 year)_.
- Macaroons contain the scope of access for a service and the expiry of the permissions in `blockHeight`.
### References
- [State](https://github.com/onsonr/sonr/tree/develop/x/macaroon#state)
- [State Transitions](https://github.com/onsonr/sonr/tree/develop/x/macaroon#state-transitions)
- [Messages](https://github.com/onsonr/sonr/tree/develop/x/macaroon#messages)
- [Queries](https://github.com/onsonr/sonr/tree/develop/x/macaroon#query)
- [Params](https://github.com/onsonr/sonr/tree/develop/x/macaroon#params)
- [Client](https://github.com/onsonr/sonr/tree/develop/x/macaroon#client)
- [Future Improvements](https://github.com/onsonr/sonr/tree/develop/x/macaroon#future-improvements)
- [Tests](https://github.com/onsonr/sonr/tree/develop/x/macaroon#tests)
- [Appendix](https://github.com/onsonr/sonr/tree/develop/x/macaroon#appendix)
---
## `x/service`
> The service module is responsible for managing decentralized services. Services
> on the Sonr network are essentially on-chain MultiSig wallets that are
> represented by a NFT. Service admins are represented by
> a [`x/did`](2--Modules-Overview.md#x-did) controller and are required to state
> the service's scope of access, and which map to the services' capabilities.
### Features
- Needs a Valid Domain with .htaccess file to be whitelisted.
### References
- [State](https://github.com/onsonr/sonr/tree/develop/x/service#state)
- [State Transitions](https://github.com/onsonr/sonr/tree/develop/x/service#state-transitions)
- [Messages](https://github.com/onsonr/sonr/tree/develop/x/service#messages)
- [Queries](https://github.com/onsonr/sonr/tree/develop/x/service#query)
- [Params](https://github.com/onsonr/sonr/tree/develop/x/service#params)
- [Client](https://github.com/onsonr/sonr/tree/develop/x/service#client)
- [Future Improvements](https://github.com/onsonr/sonr/tree/develop/x/service#future-improvements)
- [Tests](https://github.com/onsonr/sonr/tree/develop/x/service#tests)
- [Appendix](https://github.com/onsonr/sonr/tree/develop/x/service#appendix)
---
## `x/vault`
> The vault module is responsible for managing the storage and acccess-control of
> Decentralized Web Nodes (DWNs) from IPFS. Vaults contain user-facing keys and
> are represented by a [`x/did`](2--Modules-Overview.md#x-did) controller.
### Features
- Vaults can be created by anyone, but efforts are made to restrict 1 per user.
- Vaults are stored in IPFS and when claimed, the bech32 Sonr Address is pinned to IPFS.
### References
- [State](https://github.com/onsonr/sonr/tree/develop/x/vault#state)
- [State Transitions](https://github.com/onsonr/sonr/tree/develop/x/vault#state-transitions)
- [Messages](https://github.com/onsonr/sonr/tree/develop/x/vault#messages)
- [Queries](https://github.com/onsonr/sonr/tree/develop/x/vault#query)
- [Params](https://github.com/onsonr/sonr/tree/develop/x/vault#params)
- [Client](https://github.com/onsonr/sonr/tree/develop/x/vault#client)
- [Future Improvements](https://github.com/onsonr/sonr/tree/develop/x/vault#future-improvements)
- [Tests](https://github.com/onsonr/sonr/tree/develop/x/vault#tests)
- [Appendix](https://github.com/onsonr/sonr/tree/develop/x/vault#appendix)

View File

@ -1,11 +0,0 @@
> The `$SNR` token is the native platform token of the Sonr network. It is used by services to
> pay for Authentication and Authorization services. The system is designed for developers to
> be similar to centralized authentication providers like Google, Facebook, Okta, etc.
# Usage
The Sonr blockchain is a Delegated Proof of Stake (DPoS) blockchain built with the Cosmos-sdk.
# Supply
> The total supply of `$SNR` is fixed at 1 billion.

View File

@ -1,569 +0,0 @@
# Protocol Buffers in Cosmos SDK
## Overview
The Cosmos SDK uses Protocol Buffers for serialization and API definitions. Generation is handled via a Docker image: `ghcr.io/cosmos/proto-builder:0.15.x`.
## Generation Tools
- **Buf**: Primary tool for protobuf management
- **protocgen.sh**: Core generation script in `scripts/`
- **Makefile Commands**: Standard commands for generate, lint, format
## Key Components
### Buf Configuration
1. **Workspace Setup**
- Root level buf workspace configuration
- Manages multiple protobuf directories
2. **Directory Structure**
```
proto/
├── buf.gen.gogo.yaml # GoGo Protobuf generation
├── buf.gen.pulsar.yaml # Pulsar API generation
├── buf.gen.swagger.yaml # OpenAPI/Swagger docs
├── buf.lock # Dependencies
├── buf.yaml # Core configuration
├── cosmos/ # Core protos
└── tendermint/ # Consensus protos
```
3. **Module Protos**
- Located in `x/{moduleName}/proto`
- Module-specific message definitions
#### `buf.gen.gogo.yaml`
`buf.gen.gogo.yaml` defines how the protobuf files should be generated for use with in the module. This file uses [gogoproto](https://github.com/gogo/protobuf), a separate generator from the google go-proto generator that makes working with various objects more ergonomic, and it has more performant encode and decode steps
```go reference
https://github.com/cosmos/cosmos-sdk/blob/main/proto/buf.gen.gogo.yaml#L1-L9
```
#### `buf.gen.pulsar.yaml`
`buf.gen.pulsar.yaml` defines how protobuf files should be generated using the [new golang apiv2 of protobuf](https://go.dev/blog/protobuf-apiv2). This generator is used instead of the google go-proto generator because it has some extra helpers for Cosmos SDK applications and will have more performant encode and decode than the google go-proto generator. You can follow the development of this generator [here](https://github.com/cosmos/cosmos-proto).
```go reference
https://github.com/cosmos/cosmos-sdk/blob/main/proto/buf.gen.pulsar.yaml#L1-L18
```
#### `buf.gen.swagger.yaml`
`buf.gen.swagger.yaml` generates the swagger documentation for the query and messages of the chain. This will only define the REST API end points that were defined in the query and msg servers. You can find examples of this [here](https://github.com/cosmos/cosmos-sdk/blob/main/x/bank/proto/cosmos/bank/v1beta1/query.proto)
```go reference
https://github.com/cosmos/cosmos-sdk/blob/main/proto/buf.gen.swagger.yaml#L1-L6
```
#### `buf.lock`
This is an autogenerated file based off the dependencies required by the `.gen` files. There is no need to copy the current one. If you depend on cosmos-sdk proto definitions a new entry for the Cosmos SDK will need to be provided. The dependency you will need to use is `buf.build/cosmos/cosmos-sdk`.
```go reference
https://github.com/cosmos/cosmos-sdk/blob/main/proto/buf.lock#L1-L16
```
#### `buf.yaml`
`buf.yaml` defines the [name of your package](https://github.com/cosmos/cosmos-sdk/blob/main/proto/buf.yaml#L3), which [breakage checker](https://buf.build/docs/tutorials/getting-started-with-buf-cli#detect-breaking-changes) to use and how to [lint your protobuf files](https://buf.build/docs/tutorials/getting-started-with-buf-cli#lint-your-api).
It is advised to use a tagged version of the buf modules corresponding to the version of the Cosmos SDK being are used.
```go reference
https://github.com/cosmos/cosmos-sdk/blob/main/proto/buf.yaml#L1-L24
```
We use a variety of linters for the Cosmos SDK protobuf files. The repo also checks this in ci.
A reference to the github actions can be found [here](https://github.com/cosmos/cosmos-sdk/blob/main/.github/workflows/proto.yml#L1-L32)
# ORM
The Cosmos SDK ORM is a state management library that provides a rich, but opinionated set of tools for managing a
module's state. It provides support for:
- type safe management of state
- multipart keys
- secondary indexes
- unique indexes
- easy prefix and range queries
- automatic genesis import/export
- automatic query services for clients, including support for light client proofs (still in development)
- indexing state data in external databases (still in development)
## Design and Philosophy
The ORM's data model is inspired by the relational data model found in SQL databases. The core abstraction is a table
with a primary key and optional secondary indexes.
Because the Cosmos SDK uses protobuf as its encoding layer, ORM tables are defined directly in .proto files using
protobuf options. Each table is defined by a single protobuf `message` type and a schema of multiple tables is
represented by a single .proto file.
Table structure is specified in the same file where messages are defined in order to make it easy to focus on better
design of the state layer. Because blockchain state layout is part of the public API for clients (TODO: link to docs on
light client proofs), it is important to think about the state layout as being part of the public API of a module.
Changing the state layout actually breaks clients, so it is ideal to think through it carefully up front and to aim for
a design that will eliminate or minimize breaking changes down the road. Also, good design of state enables building
more performant and sophisticated applications. Providing users with a set of tools inspired by relational databases
which have a long history of database design best practices and allowing schema to be specified declaratively in a
single place are design choices the ORM makes to enable better design and more durable APIs.
Also, by only supporting the table abstraction as opposed to key-value pair maps, it is easy to add to new
columns/fields to any data structure without causing a breaking change and the data structures can easily be indexed in
any off-the-shelf SQL database for more sophisticated queries.
The encoding of fields in keys is designed to support ordered iteration for all protobuf primitive field types
except for `bytes` as well as the well-known types `google.protobuf.Timestamp` and `google.protobuf.Duration`. Encodings
are optimized for storage space when it makes sense (see the documentation in `cosmos/orm/v1/orm.proto` for more details)
and table rows do not use extra storage space to store key fields in the value.
We recommend that users of the ORM attempt to follow database design best practices such as
[normalization](https://en.wikipedia.org/wiki/Database_normalization) (at least 1NF).
For instance, defining `repeated` fields in a table is considered an anti-pattern because breaks first normal form (1NF).
Although we support `repeated` fields in tables, they cannot be used as key fields for this reason. This may seem
restrictive but years of best practice (and also experience in the SDK) have shown that following this pattern
leads to easier to maintain schemas.
To illustrate the motivation for these principles with an example from the SDK, historically balances were stored
as a mapping from account -> map of denom to amount. This did not scale well because an account with 100 token balances
needed to be encoded/decoded every time a single coin balance changed. Now balances are stored as account,denom -> amount
as in the example above. With the ORM's data model, if we wanted to add a new field to `Balance` such as
`unlocked_balance` (if vesting accounts were redesigned in this way), it would be easy to add it to this table without
requiring a data migration. Because of the ORM's optimizations, the account and denom are only stored in the key part
of storage and not in the value leading to both a flexible data model and efficient usage of storage.
## Defining Tables
To define a table:
1. create a .proto file to describe the module's state (naming it `state.proto` is recommended for consistency),
and import "cosmos/orm/v1/orm.proto", ex:
```protobuf
syntax = "proto3";
package bank_example;
import "cosmos/orm/v1/orm.proto";
```
2. define a `message` for the table, ex:
```protobuf
message Balance {
bytes account = 1;
string denom = 2;
uint64 balance = 3;
}
```
3. add the `cosmos.orm.v1.table` option to the table and give the table an `id` unique within this .proto file:
```protobuf
message Balance {
option (cosmos.orm.v1.table) = {
id: 1
};
bytes account = 1;
string denom = 2;
uint64 balance = 3;
}
```
4. define the primary key field or fields, as a comma-separated list of the fields from the message which should make
up the primary key:
```protobuf
message Balance {
option (cosmos.orm.v1.table) = {
id: 1
primary_key: { fields: "account,denom" }
};
bytes account = 1;
string denom = 2;
uint64 balance = 3;
}
```
5. add any desired secondary indexes by specifying an `id` unique within the table and a comma-separate list of the
index fields:
```protobuf
message Balance {
option (cosmos.orm.v1.table) = {
id: 1;
primary_key: { fields: "account,denom" }
index: { id: 1 fields: "denom" } // this allows querying for the accounts which own a denom
};
bytes account = 1;
string denom = 2;
uint64 amount = 3;
}
```
### Auto-incrementing Primary Keys
A common pattern in SDK modules and in database design is to define tables with a single integer `id` field with an
automatically generated primary key. In the ORM we can do this by setting the `auto_increment` option to `true` on the
primary key, ex:
```protobuf
message Account {
option (cosmos.orm.v1.table) = {
id: 2;
primary_key: { fields: "id", auto_increment: true }
};
uint64 id = 1;
bytes address = 2;
}
```
### Unique Indexes
A unique index can be added by setting the `unique` option to `true` on an index, ex:
```protobuf
message Account {
option (cosmos.orm.v1.table) = {
id: 2;
primary_key: { fields: "id", auto_increment: true }
index: {id: 1, fields: "address", unique: true}
};
uint64 id = 1;
bytes address = 2;
}
```
### Singletons
The ORM also supports a special type of table with only one row called a `singleton`. This can be used for storing
module parameters. Singletons only need to define a unique `id` and that cannot conflict with the id of other
tables or singletons in the same .proto file. Ex:
```protobuf
message Params {
option (cosmos.orm.v1.singleton) = {
id: 3;
};
google.protobuf.Duration voting_period = 1;
uint64 min_threshold = 2;
}
```
## Running Codegen
NOTE: the ORM will only work with protobuf code that implements the [google.golang.org/protobuf](https://pkg.go.dev/google.golang.org/protobuf)
API. That means it will not work with code generated using gogo-proto.
To install the ORM's code generator, run:
```shell
go install cosmossdk.io/orm/cmd/protoc-gen-go-cosmos-orm@latest
```
The recommended way to run the code generator is to use [buf build](https://docs.buf.build/build/usage).
This is an example `buf.gen.yaml` that runs `protoc-gen-go`, `protoc-gen-go-grpc` and `protoc-gen-go-cosmos-orm`
using buf managed mode:
```yaml
version: v1
managed:
enabled: true
go_package_prefix:
default: foo.bar/api # the go package prefix of your package
override:
buf.build/cosmos/cosmos-sdk: cosmossdk.io/api # required to import the Cosmos SDK api module
plugins:
- name: go
out: .
opt: paths=source_relative
- name: go-grpc
out: .
opt: paths=source_relative
- name: go-cosmos-orm
out: .
opt: paths=source_relative
```
## Using the ORM in a module
### Initialization
To use the ORM in a module, first create a `ModuleSchemaDescriptor`. This tells the ORM which .proto files have defined
an ORM schema and assigns them all a unique non-zero id. Ex:
```go
var MyModuleSchema = &ormv1alpha1.ModuleSchemaDescriptor{
SchemaFile: []*ormv1alpha1.ModuleSchemaDescriptor_FileEntry{
{
Id: 1,
ProtoFileName: mymodule.File_my_module_state_proto.Path(),
},
},
}
```
In the ORM generated code for a file named `state.proto`, there should be an interface `StateStore` that got generated
with a constructor `NewStateStore` that takes a parameter of type `ormdb.ModuleDB`. Add a reference to `StateStore`
to your module's keeper struct. Ex:
```go
type Keeper struct {
db StateStore
}
```
Then instantiate the `StateStore` instance via an `ormdb.ModuleDB` that is instantiated from the `SchemaDescriptor`
above and one or more store services from `cosmossdk.io/core/store`. Ex:
```go
func NewKeeper(storeService store.KVStoreService) (*Keeper, error) {
modDb, err := ormdb.NewModuleDB(MyModuleSchema, ormdb.ModuleDBOptions{KVStoreService: storeService})
if err != nil {
return nil, err
}
db, err := NewStateStore(modDb)
if err != nil {
return nil, err
}
return Keeper{db: db}, nil
}
```
### Using the generated code
The generated code for the ORM contains methods for inserting, updating, deleting and querying table entries.
For each table in a .proto file, there is a type-safe table interface implemented in generated code. For instance,
for a table named `Balance` there should be a `BalanceTable` interface that looks like this:
```go
type BalanceTable interface {
Insert(ctx context.Context, balance *Balance) error
Update(ctx context.Context, balance *Balance) error
Save(ctx context.Context, balance *Balance) error
Delete(ctx context.Context, balance *Balance) error
Has(ctx context.Context, account []byte, denom string) (found bool, err error)
// Get returns nil and an error which responds true to ormerrors.IsNotFound() if the record was not found.
Get(ctx context.Context, account []byte, denom string) (*Balance, error)
List(ctx context.Context, prefixKey BalanceIndexKey, opts ...ormlist.Option) (BalanceIterator, error)
ListRange(ctx context.Context, from, to BalanceIndexKey, opts ...ormlist.Option) (BalanceIterator, error)
DeleteBy(ctx context.Context, prefixKey BalanceIndexKey) error
DeleteRange(ctx context.Context, from, to BalanceIndexKey) error
doNotImplement()
}
```
This `BalanceTable` should be accessible from the `StateStore` interface (assuming our file is named `state.proto`)
via a `BalanceTable()` accessor method. If all the above example tables/singletons were in the same `state.proto`,
then `StateStore` would get generated like this:
```go
type BankStore interface {
BalanceTable() BalanceTable
AccountTable() AccountTable
ParamsTable() ParamsTable
doNotImplement()
}
```
So to work with the `BalanceTable` in a keeper method we could use code like this:
```go
func (k keeper) AddBalance(ctx context.Context, acct []byte, denom string, amount uint64) error {
balance, err := k.db.BalanceTable().Get(ctx, acct, denom)
if err != nil && !ormerrors.IsNotFound(err) {
return err
}
if balance == nil {
balance = &Balance{
Account: acct,
Denom: denom,
Amount: amount,
}
} else {
balance.Amount = balance.Amount + amount
}
return k.db.BalanceTable().Save(ctx, balance)
}
```
`List` methods take `IndexKey` parameters. For instance, `BalanceTable.List` takes `BalanceIndexKey`. `BalanceIndexKey`
let's represent index keys for the different indexes (primary and secondary) on the `Balance` table. The primary key
in the `Balance` table gets a struct `BalanceAccountDenomIndexKey` and the first index gets an index key `BalanceDenomIndexKey`.
If we wanted to list all the denoms and amounts that an account holds, we would use `BalanceAccountDenomIndexKey`
with a `List` query just on the account prefix. Ex:
```go
it, err := keeper.db.BalanceTable().List(ctx, BalanceAccountDenomIndexKey{}.WithAccount(acct))
```
---
## sidebar_position: 1
# ProtocolBuffer Annotations
This document explains the various protobuf scalars that have been added to make working with protobuf easier for Cosmos SDK application developers
## Signer
Signer specifies which field should be used to determine the signer of a message for the Cosmos SDK. This field can be used for clients as well to infer which field should be used to determine the signer of a message.
Read more about the signer field [here](./02-messages-and-queries.md).
```protobuf reference
https://github.com/cosmos/cosmos-sdk/blob/e6848d99b55a65d014375b295bdd7f9641aac95e/proto/cosmos/bank/v1beta1/tx.proto#L40
```
```proto
option (cosmos.msg.v1.signer) = "from_address";
```
## Scalar
The scalar type defines a way for clients to understand how to construct protobuf messages according to what is expected by the module and sdk.
```proto
(cosmos_proto.scalar) = "cosmos.AddressString"
```
Example of account address string scalar:
```proto reference
https://github.com/cosmos/cosmos-sdk/blob/e6848d99b55a65d014375b295bdd7f9641aac95e/proto/cosmos/bank/v1beta1/tx.proto#L46
```
Example of validator address string scalar:
```proto reference
https://github.com/cosmos/cosmos-sdk/blob/e8f28bf5db18b8d6b7e0d94b542ce4cf48fed9d6/proto/cosmos/distribution/v1beta1/query.proto#L87
```
Example of pubkey scalar:
```proto reference
https://github.com/cosmos/cosmos-sdk/blob/11068bfbcd44a7db8af63b6a8aa079b1718f6040/proto/cosmos/staking/v1beta1/tx.proto#L94
```
Example of Decimals scalar:
```proto reference
https://github.com/cosmos/cosmos-sdk/blob/e8f28bf5db18b8d6b7e0d94b542ce4cf48fed9d6/proto/cosmos/distribution/v1beta1/distribution.proto#L26
```
Example of Int scalar:
```proto reference
https://github.com/cosmos/cosmos-sdk/blob/e8f28bf5db18b8d6b7e0d94b542ce4cf48fed9d6/proto/cosmos/gov/v1/gov.proto#L137
```
There are a few options for what can be provided as a scalar: `cosmos.AddressString`, `cosmos.ValidatorAddressString`, `cosmos.ConsensusAddressString`, `cosmos.Int`, `cosmos.Dec`.
## Implements_Interface
Implement interface is used to provide information to client tooling like [telescope](https://github.com/cosmology-tech/telescope) on how to encode and decode protobuf messages.
```proto
option (cosmos_proto.implements_interface) = "cosmos.auth.v1beta1.AccountI";
```
## Method,Field,Message Added In
`method_added_in`, `field_added_in` and `message_added_in` are annotations to denotate to clients that a field has been supported in a later version. This is useful when new methods or fields are added in later versions and that the client needs to be aware of what it can call.
The annotation should be worded as follow:
```proto
option (cosmos_proto.method_added_in) = "cosmos-sdk v0.50.1";
option (cosmos_proto.method_added_in) = "x/epochs v1.0.0";
option (cosmos_proto.method_added_in) = "simapp v24.0.0";
```
## Amino
The amino codec was removed in `v0.50+`, this means there is not a need register `legacyAminoCodec`. To replace the amino codec, Amino protobuf annotations are used to provide information to the amino codec on how to encode and decode protobuf messages.
:::note
Amino annotations are only used for backwards compatibility with amino. New modules are not required use amino annotations.
:::
The below annotations are used to provide information to the amino codec on how to encode and decode protobuf messages in a backwards compatible manner.
### Name
Name specifies the amino name that would show up for the user in order for them see which message they are signing.
```proto
option (amino.name) = "cosmos-sdk/BaseAccount";
```
```proto reference
https://github.com/cosmos/cosmos-sdk/blob/e8f28bf5db18b8d6b7e0d94b542ce4cf48fed9d6/proto/cosmos/bank/v1beta1/tx.proto#L41
```
### Field_Name
Field name specifies the amino name that would show up for the user in order for them see which field they are signing.
```proto
uint64 height = 1 [(amino.field_name) = "public_key"];
```
```proto reference
https://github.com/cosmos/cosmos-sdk/blob/e8f28bf5db18b8d6b7e0d94b542ce4cf48fed9d6/proto/cosmos/distribution/v1beta1/distribution.proto#L166
```
### Dont_OmitEmpty
Dont omitempty specifies that the field should not be omitted when encoding to amino.
```proto
repeated cosmos.base.v1beta1.Coin amount = 3 [(amino.dont_omitempty) = true];
```
```proto reference
https://github.com/cosmos/cosmos-sdk/blob/e8f28bf5db18b8d6b7e0d94b542ce4cf48fed9d6/proto/cosmos/bank/v1beta1/bank.proto#L56
```
### Encoding
Encoding instructs the amino json marshaler how to encode certain fields that may differ from the standard encoding behaviour. The most common example of this is how `repeated cosmos.base.v1beta1.Coin` is encoded when using the amino json encoding format. The `legacy_coins` option tells the json marshaler [how to encode a null slice](https://github.com/cosmos/cosmos-sdk/blob/e8f28bf5db18b8d6b7e0d94b542ce4cf48fed9d6/x/tx/signing/aminojson/json_marshal.go#L65) of `cosmos.base.v1beta1.Coin`.
```proto
(amino.encoding) = "legacy_coins",
```
```proto reference
https://github.com/cosmos/cosmos-sdk/blob/e8f28bf5db18b8d6b7e0d94b542ce4cf48fed9d6/proto/cosmos/bank/v1beta1/genesis.proto#L23
```
Another example is a protobuf `bytes` that contains a valid JSON document.
The `inline_json` option tells the json marshaler to embed the JSON bytes into the wrapping document without escaping.
```proto
(amino.encoding) = "inline_json",
```
E.g. the bytes containing `{"foo":123}` in the `envelope` field would lead to the following JSON:
```json
{
"envelope": {
"foo": 123
}
}
```
If the bytes are not valid JSON, this leads to JSON broken documents. Thus a JSON validity check needs to be in place at some point of the process.

View File

@ -1,627 +0,0 @@
# RFC 004: Account System Refactor
## Status
- Draft v2 (May 2023)
## Current Limitations
1. **Account Representation**: Limited by `google.Protobuf.Any` encapsulation and basic authentication methods
2. **Interface Constraints**: Lacks support for advanced functionalities like vesting and complex auth systems
3. **Implementation Rigidity**: Poor differentiation between account types (e.g., `ModuleAccount`)
4. **Authorization System**: Basic `x/auth` module with limited scope beyond `x/bank` functionality
5. **Dependency Issues**: Cyclic dependencies between modules (e.g., `x/auth``x/bank` for vesting)
## Proposal
This proposal aims to transform the way accounts are managed within the Cosmos SDK by introducing significant changes to
their structure and functionality.
### Rethinking Account Representation and Business Logic
Instead of representing accounts as simple `google.Protobuf.Any` structures stored in state with no business logic
attached, this proposal suggests a more sophisticated account representation that is closer to module entities.
In fact, accounts should be able to receive messages and process them in the same way modules do, and be capable of storing
state in a isolated (prefixed) portion of state belonging only to them, in the same way as modules do.
### Account Message Reception
We propose that accounts should be able to receive messages in the same way modules can, allowing them to manage their
own state modifications without relying on other modules. This change would enable more advanced account functionality, such as the
`VestingAccount` example, where the x/bank module previously needed to change the vestingState by casting the abstracted
account to `VestingAccount` and triggering the `TrackDelegation` call. Accounts are already capable of sending messages when
a state transition, originating from a transaction, is executed.
When accounts receive messages, they will be able to identify the sender of the message and decide how to process the
state transition, if at all.
### Consequences
These changes would have significant implications for the Cosmos SDK, resulting in a system of actors that are equal from
the runtime perspective. The runtime would only be responsible for propagating messages between actors and would not
manage the authorization system. Instead, actors would manage their own authorizations. For instance, there would be no
need for the `x/auth` module to manage minting or burning of coins permissions, as it would fall within the scope of the
`x/bank` module.
The key difference between accounts and modules would lie in the origin of the message (state transition). Accounts
(ExternallyOwnedAccount), which have credentials (e.g., a public/private key pairing), originate state transitions from
transactions. In contrast, module state transitions do not have authentication credentials backing them and can be
caused by two factors: either as a consequence of a state transition coming from a transaction or triggered by a scheduler
(e.g., the runtime's Begin/EndBlock).
By implementing these proposed changes, the Cosmos SDK will benefit from a more extensible, versatile, and efficient account
management system that is better suited to address the requirements of the Cosmos ecosystem.
#### Standardization
With `x/accounts` allowing a modular api there becomes a need for standardization of accounts or the interfaces wallets and other clients should expect to use. For this reason we will be using the [`CIP` repo](https://github.com/cosmos/cips) in order to standardize interfaces in order for wallets to know what to expect when interacting with accounts.
## Implementation
### Account Definition
We define the new `Account` type, which is what an account needs to implement to be treated as such.
An `Account` type is defined at APP level, so it cannot be dynamically loaded as the chain is running without upgrading the
node code, unless we create something like a `CosmWasmAccount` which is an account backed by an `x/wasm` contract.
```go
// Account is what the developer implements to define an account.
type Account[InitMsg proto.Message] interface {
// Init is the function that initialises an account instance of a given kind.
// InitMsg is used to initialise the initial state of an account.
Init(ctx *Context, msg InitMsg) error
// RegisterExecuteHandlers registers an account's execution messages.
RegisterExecuteHandlers(executeRouter *ExecuteRouter)
// RegisterQueryHandlers registers an account's query messages.
RegisterQueryHandlers(queryRouter *QueryRouter)
// RegisterMigrationHandlers registers an account's migration messages.
RegisterMigrationHandlers(migrationRouter *MigrationRouter)
}
```
### The InternalAccount definition
The public `Account` interface implementation is then converted by the runtime into an `InternalAccount` implementation,
which contains all the information and business logic needed to operate the account.
```go
type Schema struct {
state StateSchema // represents the state of an account
init InitSchema // represents the init msg schema
exec ExecSchema // represents the multiple execution msg schemas, containing also responses
query QuerySchema // represents the multiple query msg schemas, containing also responses
migrate *MigrateSchema // represents the multiple migrate msg schemas, containing also responses, it's optional
}
type InternalAccount struct {
init func(ctx *Context, msg proto.Message) (*InitResponse, error)
execute func(ctx *Context, msg proto.Message) (*ExecuteResponse, error)
query func(ctx *Context, msg proto.Message) (proto.Message, error)
schema func() *Schema
migrate func(ctx *Context, msg proto.Message) (*MigrateResponse, error)
}
```
This is an internal view of the account as intended by the system. It is not meant to be what developers implement. An
example implementation of the `InternalAccount` type can be found in [this](https://github.com/testinginprod/accounts-poc/blob/main/examples/recover/recover.go)
example of account whose credentials can be recovered. In fact, even if the `Internal` implementation is untyped (with
respect to `proto.Message`), the concrete implementation is fully typed.
During any of the execution methods of `InternalAccount`, `schema` excluded, the account is given a `Context` which provides:
- A namespaced `KVStore` for the account, which isolates the account state from others (NOTE: no `store keys` needed,
the account address serves as `store key`).
- Information regarding itself (its address)
- Information regarding the sender.
- ...
#### Init
Init defines the entrypoint that allows for a new account instance of a given kind to be initialised.
The account is passed some opaque protobuf message which is then interpreted and contains the instructions that
constitute the initial state of an account once it is deployed.
An `Account` code can be deployed multiple times through the `Init` function, similar to how a `CosmWasm` contract code
can be deployed (Instantiated) multiple times.
#### Execute
Execute defines the entrypoint that allows an `Account` to process a state transition, the account can decide then how to
process the state transition based on the message provided and the sender of the transition.
#### Query
Query defines a read-only entrypoint that provides a stable interface that links an account with its state. The reason for
which `Query` is still being preferred as an addition to raw state reflection is to:
- Provide a stable interface for querying (state can be optimised and change more frequently than a query)
- Provide a way to define an account `Interface` with respect to its `Read/Write` paths.
- Provide a way to query information that cannot be processed from raw state reflection, ex: compute information from lazy
state that has not been yet concretely processed (eg: balances with respect to lazy inputs/outputs)
#### Schema
Schema provides the definition of an account from `API` perspective, and it's the only thing that should be taken into account
when interacting with an account from another account or module, for example: an account is an `authz-interface` account if
it has the following message in its execution messages `MsgProxyStateTransition{ state_transition: google.Protobuf.Any }`.
### Migrate
Migrate defines the entrypoint that allows an `Account` to migrate its state from a previous version to a new one. Migrations
can be initiated only by the account itself, concretely this means that the migrate action sender can only be the account address
itself, if the account wants to allow another address to migrate it on its behalf then it could create an execution message
that makes the account migrate itself.
### x/accounts module
In order to create accounts we define a new module `x/accounts`, note that `x/accounts` deploys account with no authentication
credentials attached to it which means no action of an account can be incepted from a TX, we will later explore how the
`x/authn` module uses `x/accounts` to deploy authenticated accounts.
This also has another important implication for which account addresses are now fully decoupled from the authentication mechanism
which makes in turn off-chain operations a little more complex, as the chain becomes the real link between account identifier
and credentials.
We could also introduce a way to deterministically compute the account address.
Note, from the transaction point of view, the `init_message` and `execute_message` are opaque `google.Protobuf.Any`.
The module protobuf definition for `x/accounts` are the following:
```protobuf
// Msg defines the Msg service.
service Msg {
rpc Deploy(MsgDeploy) returns (MsgDeployResponse);
rpc Execute(MsgExecute) returns (MsgExecuteResponse);
rpc Migrate(MsgMigrate) returns (MsgMigrateResponse);
}
message MsgDeploy {
string sender = 1;
string kind = 2;
google.Protobuf.Any init_message = 3;
repeated google.Protobuf.Any authorize_messages = 4 [(gogoproto.nullable) = false];
}
message MsgDeployResponse {
string address = 1;
uint64 id = 2;
google.Protobuf.Any data = 3;
}
message MsgExecute {
string sender = 1;
string address = 2;
google.Protobuf.Any message = 3;
repeated google.Protobuf.Any authorize_messages = 4 [(gogoproto.nullable) = false];
}
message MsgExecuteResponse {
google.Protobuf.Any data = 1;
}
message MsgMigrate {
string sender = 1;
string new_account_kind = 2;
google.Protobuf.Any migrate_message = 3;
}
message MsgMigrateResponse {
google.Protobuf.Any data = 1;
}
```
#### MsgDeploy
Deploys a new instance of the given account `kind` with initial settings represented by the `init_message` which is a `google.Protobuf.Any`.
Of course the `init_message` can be empty. A response is returned containing the account ID and humanised address, alongside some response
that the account instantiation might produce.
#### Address derivation
In order to decouple public keys from account addresses, we introduce a new address derivation mechanism which is
#### MsgExecute
Sends a `StateTransition` execution request, where the state transition is represented by the `message` which is a `google.Protobuf.Any`.
The account can then decide if to process it or not based on the `sender`.
### MsgMigrate
Migrates an account to a new version of itself, the new version is represented by the `new_account_kind`. The state transition
can only be incepted by the account itself, which means that the `sender` must be the account address itself. During the migration
the account current state is given to the new version of the account, which then executes the migration logic using the `migrate_message`,
it might change state or not, it's up to the account to decide. The response contains possible data that the account might produce
after the migration.
#### Authorize Messages
The `Deploy` and `Execute` messages have a field in common called `authorize_messages`, these messages are messages that the account
can execute on behalf of the sender. For example, in case an account is expecting some funds to be sent from the sender,
the sender can attach a `MsgSend` that the account can execute on the sender's behalf. These authorizations are short-lived,
they live only for the duration of the `Deploy` or `Execute` message execution, or until they are consumed.
An alternative would have been to add a `funds` field, like it happens in cosmwasm, which guarantees the called contract that
the funds are available and sent in the context of the message execution. This would have been a simpler approach, but it would
have been limited to the context of `MsgSend` only, where the asset is `sdk.Coins`. The proposed generic way, instead, allows
the account to execute any message on behalf of the sender, which is more flexible, it could include NFT send execution, or
more complex things like `MsgMultiSend` or `MsgDelegate`, etc.
### Further discussion
#### Sub-accounts
We could provide a way to link accounts to other accounts. Maybe during deployment the sender could decide to link the
newly created to its own account, although there might be use-cases for which the deployer is different from the account
that needs to be linked, in this case a handshake protocol on linking would need to be defined.
#### Predictable address creation
We need to provide a way to create an account with a predictable address, this might serve a lot of purposes, like accounts
wanting to generate an address that:
- nobody else can claim besides the account used to generate the new account
- is predictable
For example:
```protobuf
message MsgDeployPredictable {
string sender = 1;
uint32 nonce = 2;
...
}
```
And then the address becomes `bechify(concat(sender, nonce))`
`x/accounts` would still use the monotonically increasing sequence as account number.
#### Joining Multiple Accounts
As developers are building new kinds of accounts, it becomes necessary to provide a default way to combine the
functionalities of different account types. This allows developers to avoid duplicating code and enables end-users to
create or migrate to accounts with multiple functionalities without requiring custom development.
To address this need, we propose the inclusion of a default account type called "MultiAccount". The MultiAccount type is
designed to merge the functionalities of other accounts by combining their execution, query, and migration APIs.
The account joining process would only fail in the case of API (intended as non-state Schema APIs) conflicts, ensuring
compatibility and consistency.
With the introduction of the MultiAccount type, users would have the option to either migrate their existing accounts to
a MultiAccount type or extend an existing MultiAccount with newer APIs. This flexibility empowers users to leverage
various account functionalities without compromising compatibility or resorting to manual code duplication.
The MultiAccount type serves as a standardized solution for combining different account functionalities within the
cosmos-sdk ecosystem. By adopting this approach, developers can streamline the development process and users can benefit
from a modular and extensible account system.
# ADR 071: Cryptography v2- Multi-curve support
## Change log
- May 7th 2024: Initial Draft (Zondax AG: @raynaudoe @juliantoledano @jleni @educlerici-zondax @lucaslopezf)
- June 13th 2024: Add CometBFT implementation proposal (Zondax AG: @raynaudoe @juliantoledano @jleni @educlerici-zondax @lucaslopezf)
- July 2nd 2024: Split ADR proposal, add link to ADR in cosmos/crypto (Zondax AG: @raynaudoe @juliantoledano @jleni @educlerici-zondax @lucaslopezf)
## Status
DRAFT
## Abstract
This ADR proposes the refactoring of the existing `Keyring` and `cosmos-sdk/crypto` code to implement [ADR-001-CryptoProviders](https://github.com/cosmos/crypto/blob/main/docs/architecture/adr-001-crypto-provider.md).
For in-depth details of the `CryptoProviders` and their design please refer to ADR mentioned above.
## Introduction
The introduction of multi-curve support in the cosmos-sdk cryptographic package offers significant advantages. By not being restricted to a single cryptographic curve, developers can choose the most appropriate curve based on security, performance, and compatibility requirements. This flexibility enhances the application's ability to adapt to evolving security standards and optimizes performance for specific use cases, helping to future-proofing the sdk's cryptographic capabilities.
The enhancements in this proposal not only render the ["Keyring ADR"](https://github.com/cosmos/cosmos-sdk/issues/14940) obsolete, but also encompass its key aspects, replacing it with a more flexible and comprehensive approach. Furthermore, the gRPC service proposed in the mentioned ADR can be easily implemented as a specialized `CryptoProvider`.
### Glossary
1. **Interface**: In the context of this document, "interface" refers to Go's interface.
2. **Module**: In this document, "module" refers to a Go module.
3. **Package**: In the context of Go, a "package" refers to a unit of code organization.
## Context
In order to fully understand the need for changes and the proposed improvements, it's crucial to consider the current state of affairs:
- The Cosmos SDK currently lacks a comprehensive ADR for the cryptographic package.
- If a blockchain project requires a cryptographic curve that is not supported by the current SDK, the most likely scenario is that they will need to fork the SDK repository and make modifications. These modifications could potentially make the fork incompatible with future updates from the upstream SDK, complicating maintenance and integration.
- Type leakage of specific crypto data types expose backward compatibility and extensibility challenges.
- The demand for a more flexible and extensible approach to cryptography and address management is high.
- Architectural changes are necessary to resolve many of the currently open issues related to new curves support.
- There is a current trend towards modularity in the Interchain stack (e.g., runtime modules).
- Security implications are a critical consideration during the redesign work.
## Objectives
The key objectives for this proposal are:
- Leverage `CryptoProviders`: Utilize them as APIs for cryptographic tools, ensuring modularity, flexibility, and ease of integration.
Developer-Centric Approach
- Prioritize clear, intuitive interfaces and best-practice design principles.
Quality Assurance
- Enhanced Test Coverage: Improve testing methodologies to ensure the robustness and reliability of the module.
## Technical Goals
New Keyring:
- Design a new `Keyring` interface with modular backends injection system to support hardware devices and cloud-based HSMs. This feature is optional and tied to complexity; if it proves too complex, it will be deferred to a future release as an enhancement.
## Proposed architecture
### Components
The main components to be used will be the same as those found in the [ADR-001](https://github.com/cosmos/crypto/blob/main/docs/architecture/adr-001-crypto-provider.md#components).
#### Storage and persistence
The storage and persistence layer is tasked with storing a `CryptoProvider`s. Specifically, this layer must:
- Securely store the crypto provider's associated private key (only if stored locally, otherwise a reference to the private key will be stored instead).
- Store the [`ProviderMetadata`](https://github.com/cosmos/crypto/blob/main/docs/architecture/adr-001-crypto-provider.md#metadata) struct which contains the data that distinguishes that provider.
The purpose of this layer is to ensure that upon retrieval of the persisted data, we can access the provider's type, version, and specific configuration (which varies based on the provider type). This information will subsequently be utilized to initialize the appropriate factory, as detailed in the following section on the factory pattern.
The storage proposal involves using a modified version of the [Record](https://github.com/cosmos/cosmos-sdk/blob/main/proto/cosmos/crypto/keyring/v1/record.proto) struct, which is already defined in **Keyring/v1**. Additionally, we propose utilizing the existing keyring backends (keychain, filesystem, memory, etc.) to store these `Record`s in the same manner as the current **Keyring/v1**.
_Note: This approach will facilitate a smoother migration path from the current Keyring/v1 to the proposed architecture._
Below is the proposed protobuf message to be included in the modified `Record.proto` file
##### Protobuf message structure
The [record.proto](https://github.com/cosmos/cosmos-sdk/blob/main/proto/cosmos/crypto/keyring/v1/record.proto) file will be modified to include the `CryptoProvider` message as an optional field as follows.
```protobuf
// record.proto
message Record {
string name = 1;
google.protobuf.Any pub_key = 2;
oneof item {
Local local = 3;
Ledger ledger = 4;
Multi multi = 5;
Offline offline = 6;
CryptoProvider crypto_provider = 7; // <- New
}
message Local {
google.protobuf.Any priv_key = 1;
}
message Ledger {
hd.v1.BIP44Params path = 1;
}
message Multi {}
message Offline {}
}
```
##### Creating and loading a `CryptoProvider`
For creating providers, we propose a _factory pattern_ and a _registry_ for these builders. Examples of these
patterns can be found [here](https://github.com/cosmos/crypto/blob/main/docs/architecture/adr-001-crypto-provider.md#illustrative-code-snippets)
##### Keyring
The new `Keyring` interface will serve as a central hub for managing and fetching `CryptoProviders`. To ensure a smoother migration path, the new Keyring will be backward compatible with the previous version. Since this will be the main API from which applications will obtain their `CryptoProvider` instances, the proposal is to extend the Keyring interface to include the methods:
```go
type KeyringV2 interface {
// methods from Keyring/v1
// ListCryptoProviders returns a list of all the stored CryptoProvider metadata.
ListCryptoProviders() ([]ProviderMetadata, error)
// GetCryptoProvider retrieves a specific CryptoProvider by its id.
GetCryptoProvider(id string) (CryptoProvider, error)
}
```
_Note_: Methods to obtain a provider from a public key or other means that make it easier to load the desired provider can be added.
##### Especial use case: remote signers
It's important to note that the `CryptoProvider` interface is versatile enough to be implemented as a remote signer. This capability allows for the integration of remote cryptographic operations, which can be particularly useful in distributed or cloud-based environments where local cryptographic resources are limited or need to be managed centrally.
## Alternatives
It is important to note that all the code presented in this document is not in its final form and could be subject to changes at the time of implementation. The examples and implementations discussed should be interpreted as alternatives, providing a conceptual framework rather than definitive solutions. This flexibility allows for adjustments based on further insights, technical evaluations, or changing requirements as development progresses.
## Decision
We will:
- Leverage crypto providers
- Refactor the module structure as described above.
- Define types and interfaces as the code attached.
- Refactor existing code into new structure and interfaces.
- Implement Unit Tests to ensure no backward compatibility issues.
## Consequences
### Impact on the SDK codebase
We can divide the impact of this ADR into two main categories: state machine code and client related code.
#### Client
The major impact will be on the client side, where the current `Keyring` interface will be replaced by the new `KeyringV2` interface. At first, the impact will be low since `CryptoProvider` is an optional field in the `Record` message, so there's no mandatory requirement for migrating to this new concept right away. This allows a progressive transition where the risks of breaking changes or regressions are minimized.
#### State Machine
The impact on the state machine code will be minimal, the modules affected (at the time of writing this ADR)
are the `x/accounts` module, specifically the `Authenticate` function and the `x/auth/ante` module. This function will need to be adapted to use a `CryptoProvider` service to make use of the `Verifier` instance.
Worth mentioning that there's also the alternative of using `Verifier` instances in a standalone fashion (see note below).
The specific way to adapt these modules will be deeply analyzed and decided at implementation time of this ADR.
_Note_: All cryptographic tools (hashers, verifiers, signers, etc.) will continue to be available as standalone packages that can be imported and utilized directly without the need for a `CryptoProvider` instance. However, the `CryptoProvider` is the recommended method for using these tools as it offers a more secure way to handle sensitive data, enhanced modularity, and the ability to store configurations and metadata within the `CryptoProvider` definition.
### Backwards Compatibility
The proposed migration path is similar to what the cosmos-sdk has done in the past. To ensure a smooth transition, the following steps will be taken:
Once ADR-001 is implemented with a stable release:
- Deprecate the old crypto package. The old crypto package will still be usable, but it will be marked as deprecated and users can opt to use the new package.
- Migrate the codebase to use the new cosmos/crypto package and remove the old crypto one.
### Positive
- Single place of truth
- Easier to use interfaces
- Easier to extend
- Unit test for each crypto package
- Greater maintainability
- Incentivize addition of implementations instead of forks
- Decoupling behavior from implementation
- Sanitization of code
### Negative
- It will involve an effort to adapt existing code.
- It will require attention to detail and audition.
### Neutral
- It will involve extensive testing.
## Test Cases
- The code will be unit tested to ensure a high code coverage
- There should be integration tests around Keyring and CryptoProviders.
> While an ADR is in the DRAFT or PROPOSED stage, this section should contain a
> summary of issues to be solved in future iterations (usually referencing comments
> from a pull-request discussion).
>
> Later, this section can optionally list ideas or improvements the author or
> reviewers found during the analysis of this ADR.
# ADR-71 Bank V2
## Status
DRAFT
## Changelog
- 2024-05-08: Initial Draft (@samricotta, @julienrbrt)
## Abstract
The primary objective of refactoring the bank module is to simplify and enhance the functionality of the Cosmos SDK. Over time the bank module has been burdened with numerous responsibilities including transaction handling, account restrictions, delegation counting, and the minting and burning of coins.
In addition to the above, the bank module is currently too rigid and handles too many tasks, so this proposal aims to streamline the module by focusing on core functions `Send`, `Mint`, and `Burn`.
Currently, the module is split across different keepers with scattered and duplicates functionalities (with 4 send functions for instance).
Additionally, the integration of the token factory into the bank module allows for standardization, and better integration within the core modules.
This rewrite will reduce complexity and enhance the efficiency and UX of the bank module.
## Context
The current implementation of the bank module is characterised by its handling of a broad array of functions, leading to significant complexity in using and extending the bank module.
These issues have underscored the need for a refactoring strategy that simplifies the modules architecture and focuses on its most essential operations.
Additionally, there is an overlap in functionality with a Token Factory module, which could be integrated to streamline oper.
## Decision
**Permission Tightening**: Access to the module can be restricted to selected denominations only, ensuring that it operates within designated boundaries and does not exceed its intended scope. Currently, the permissions allow all denoms, so this should be changed. Send restrictions functionality will be maintained.
**Simplification of Logic**: The bank module will focus on core functionalities `Send`, `Mint`, and `Burn`. This refinement aims to streamline the architecture, enhancing both maintainability and performance.
**Integration of Token Factory**: The Token Factory will be merged into the bank module. This consolidation of related functionalities aims to reduce redundancy and enhance coherence within the system. Migrations functions will be provided for migrating from Osmosis' Token Factory module to bank/v2.
**Legacy Support**: A legacy wrapper will be implemented to ensure compatibility with about 90% of existing functions. This measure will facilitate a smooth transition while keeping older systems functional.
**Denom Implementation**: A asset interface will be added to standardise interactions such as transfers, balance inquiries, minting, and burning across different tokens. This will allow the bank module to support arbitrary asset types, enabling developers to implement custom, ERC20-like denominations.
For example, currently if a team would like to extend the transfer method the changes would apply universally, affecting all denoms. With the proposed Asset Interface, it allows teams to customise or extend the transfer method specifically for their own tokens without impacting others.
These improvements are expected to enhance the flexibility of the bank module, allowing for the creation of custom tokens similar to ERC20 standards and assets backed by CosmWasm (CW) contracts. The integration efforts will also aim to unify CW20 with bank coins across the Cosmos chains.
Example of denom interface:
```go
type AssetInterface interface {
Transfer(ctx sdk.Context, from sdk.AccAddress, to sdk.AccAddress, amount sdk.Coin) error
Mint(ctx sdk.Context, to sdk.AccAddress, amount sdk.Coin) error
Burn(ctx sdk.Context, from sdk.AccAddress, amount sdk.Coin) error
QueryBalance(ctx sdk.Context, account sdk.AccAddress) (sdk.Coin, error)
}
```
Overview of flow:
1. Alice initiates a transfer by entering Bob's address and the amount (100 ATOM)
2. The Bank module verifies that the ATOM token implements the `AssetInterface` by querying the `ATOM_Denom_Account`, which is an `x/account` denom account.
3. The Bank module executes the transfer by subtracting 100 ATOM from Alices balance and adding 100 ATOM to Bobs balance.
4. The Bank module calls the Transfer method on the `ATOM_Denom_Account`. The Transfer method, defined in the `AssetInterface`, handles the logic to subtract 100 ATOM from Alices balance and add 100 ATOM to Bobs balance.
5. The Bank module updates the chain and returns the new balances.
6. Both Alice and Bob successfully receive the updated balances.
## Migration Plans
Bank is a widely used module, so getting a v2 needs to be thought thoroughly. In order to not force all dependencies to immediately migrate to bank/v2, the same _upgrading_ path will be taken as for the `gov` module.
This means `cosmossdk.io/bank` will stay one module and there won't be a new `cosmossdk.io/bank/v2` go module. Instead the bank protos will be versioned from `v1beta1` (current bank) to `v2`.
Bank `v1beta1` endpoints will use the new bank v2 implementation for maximum backward compatibility.
The bank `v1beta1` keepers will be deprecated and potentially eventually removed, but its proto and messages definitions will remain.
Additionally, as bank plans to integrate token factory, migrations functions will be provided to migrate from Osmosis token factory implementation (most widely used implementation) to the new bank/v2 token factory.
## Consequences
### Positive
- Simplified interaction with bank APIs
- Backward compatible changes (no contracts or apis broken)
- Optional migration (note: bank `v1beta1` won't get any new feature after bank `v2` release)
### Neutral
- Asset implementation not available cross-chain (IBC-ed custom asset should possibly fallback to the default implementation)
- Many assets may slow down bank balances requests
### Negative
- Temporarily duplicate functionalities as bank `v1beta1` are `v2` are living alongside
- Difficultity to ever completely remove bank `v1beta1`
### References
- Current bank module implementation: https://github.com/cosmos/cosmos-sdk/blob/v0.50.6/x/bank/keeper/keeper.go#L22-L53
- Osmosis token factory: https://github.com/osmosis-labs/osmosis/tree/v25.0.0/x/tokenfactory/keeper

View File

@ -1,685 +0,0 @@
# Cosmos SDK Core Components
## Overview
The Cosmos SDK is a framework for building secure blockchain applications on CometBFT. It provides:
- ABCI implementation in Go
- Multi-store persistence layer
- Transaction routing system
## Transaction Flow
1. CometBFT consensus delivers transaction bytes
2. SDK decodes transactions and extracts messages
3. Messages routed to appropriate modules
4. State changes committed to stores
```mermaid
graph TD
A[CometBFT] -->|Tx Bytes| B[SDK Decode]
B -->|Messages| C[Module Router]
C -->|State Changes| D[Multi-store]
```
## `baseapp`
`baseapp` is the boilerplate implementation of a Cosmos SDK application. It comes with an implementation of the ABCI to handle the connection with the underlying consensus engine. Typically, a Cosmos SDK application extends `baseapp` by embedding it in [`app.go`](../beginner/00-app-anatomy.md#core-application-file).
Here is an example of this from `simapp`, the Cosmos SDK demonstration app:
```go reference
https://github.com/cosmos/cosmos-sdk/blob/v0.52.0-beta.1/simapp/app.go#L145-L186
```
The goal of `baseapp` is to provide a secure interface between the store and the extensible state machine while defining as little about the state machine as possible (staying true to the ABCI).
For more on `baseapp`, please click [here](../advanced/00-baseapp.md).
## Multistore
The Cosmos SDK provides a [`multistore`](../advanced/04-store.md#multistore) for persisting state. The multistore allows developers to declare any number of [`KVStores`](../advanced/04-store.md#base-layer-kvstores). These `KVStores` only accept the `[]byte` type as value and therefore any custom structure needs to be marshalled using [a codec](../advanced/05-encoding.md) before being stored.
The multistore abstraction is used to divide the state in distinct compartments, each managed by its own module. For more on the multistore, click [here](../advanced/04-store.md#multistore).
## Modules
The power of the Cosmos SDK lies in its modularity. Cosmos SDK applications are built by aggregating a collection of interoperable modules. Each module defines a subset of the state and contains its own message/transaction processor, while the Cosmos SDK is responsible for routing each message to its respective module.
Here is a simplified view of how a transaction is processed by the application of each full-node when it is received in a valid block:
```mermaid
flowchart TD
A[Transaction relayed from the full-node's CometBFT engine to the node's application via DeliverTx] --> B[APPLICATION]
B -->|"Using baseapp's methods: Decode the Tx, extract and route the message(s)"| C[Message routed to the correct module to be processed]
C --> D1[AUTH MODULE]
C --> D2[BANK MODULE]
C --> D3[STAKING MODULE]
C --> D4[GOV MODULE]
D1 -->|Handle message, Update state| E["Return result to CometBFT (0=Ok, 1=Err)"]
D2 -->|Handle message, Update state| E["Return result to CometBFT (0=Ok, 1=Err)"]
D3 -->|Handle message, Update state| E["Return result to CometBFT (0=Ok, 1=Err)"]
D4 -->|Handle message, Update state| E["Return result to CometBFT (0=Ok, 1=Err)"]
```
Each module can be seen as a little state-machine. Developers need to define the subset of the state handled by the module, as well as custom message types that modify the state (_Note:_ `messages` are extracted from `transactions` by `baseapp`). In general, each module declares its own `KVStore` in the `multistore` to persist the subset of the state it defines. Most developers will need to access other 3rd party modules when building their own modules. Given that the Cosmos SDK is an open framework, some of the modules may be malicious, which means there is a need for security principles to reason about inter-module interactions. These principles are based on [object-capabilities](../advanced/10-ocap.md). In practice, this means that instead of having each module keep an access control list for other modules, each module implements special objects called `keepers` that can be passed to other modules to grant a pre-defined set of capabilities.
Cosmos SDK modules are defined in the `x/` folder of the Cosmos SDK. Some core modules include:
- `x/auth`: Used to manage accounts and signatures.
- `x/bank`: Used to enable tokens and token transfers.
- `x/staking` + `x/slashing`: Used to build Proof-of-Stake blockchains.
In addition to the already existing modules in `x/`, which anyone can use in their app, the Cosmos SDK lets you build your own custom modules. You can check an [example of that in the tutorial](https://tutorials.cosmos.network/).# Keepers
:::note Synopsis
`Keeper`s refer to a Cosmos SDK abstraction whose role is to manage access to the subset of the state defined by various modules. `Keeper`s are module-specific, i.e. the subset of state defined by a module can only be accessed by a `keeper` defined in said module. If a module needs to access the subset of state defined by another module, a reference to the second module's internal `keeper` needs to be passed to the first one. This is done in `app.go` during the instantiation of module keepers.
:::
:::note Pre-requisite Readings
- [Introduction to Cosmos SDK Modules](./00-intro.md)
:::
## Motivation
The Cosmos SDK is a framework that makes it easy for developers to build complex decentralized applications from scratch, mainly by composing modules together. As the ecosystem of open-source modules for the Cosmos SDK expands, it will become increasingly likely that some of these modules contain vulnerabilities, as a result of the negligence or malice of their developer.
The Cosmos SDK adopts an [object-capabilities-based approach](https://docs.cosmos.network/main/learn/advanced/ocap#ocaps-in-practice) to help developers better protect their application from unwanted inter-module interactions, and `keeper`s are at the core of this approach. A `keeper` can be considered quite literally to be the gatekeeper of a module's store(s). Each store (typically an [`IAVL` Store](../../learn/advanced/04-store.md#iavl-store)) defined within a module comes with a `storeKey`, which grants unlimited access to it. The module's `keeper` holds this `storeKey` (which should otherwise remain unexposed), and defines [methods](#implementing-methods) for reading and writing to the store(s).
The core idea behind the object-capabilities approach is to only reveal what is necessary to get the work done. In practice, this means that instead of handling permissions of modules through access-control lists, module `keeper`s are passed a reference to the specific instance of the other modules' `keeper`s that they need to access (this is done in the [application's constructor function](../../learn/beginner/00-app-anatomy.md#constructor-function)). As a consequence, a module can only interact with the subset of state defined in another module via the methods exposed by the instance of the other module's `keeper`. This is a great way for developers to control the interactions that their own module can have with modules developed by external developers.
## Type Definition
`keeper`s are generally implemented in a `/keeper/keeper.go` file located in the module's folder. By convention, the type `keeper` of a module is simply named `Keeper` and usually follows the following structure:
```go
type Keeper struct {
// External keepers, if any
// Store key(s)
// codec
// authority
}
```
For example, here is the type definition of the `keeper` from the `staking` module:
```go reference
https://github.com/cosmos/cosmos-sdk/blob/v0.52.0-beta.1/x/staking/keeper/keeper.go#L54-L115
```
Let us go through the different parameters:
- An expected `keeper` is a `keeper` external to a module that is required by the internal `keeper` of said module. External `keeper`s are listed in the internal `keeper`'s type definition as interfaces. These interfaces are themselves defined in an `expected_keepers.go` file in the root of the module's folder. In this context, interfaces are used to reduce the number of dependencies, as well as to facilitate the maintenance of the module itself.
- `KVStoreService`s grant access to the store(s) of the [multistore](../../learn/advanced/04-store.md) managed by the module. They should always remain unexposed to external modules.
- `cdc` is the [codec](../../learn/advanced/05-encoding.md) used to marshal and unmarshal structs to/from `[]byte`. The `cdc` can be any of `codec.BinaryCodec`, `codec.JSONCodec` or `codec.Codec` based on your requirements. It can be either a proto or amino codec as long as they implement these interfaces.
- The authority listed is a module account or user account that has the right to change module level parameters. Previously this was handled by the param module, which has been deprecated.
Of course, it is possible to define different types of internal `keeper`s for the same module (e.g. a read-only `keeper`). Each type of `keeper` comes with its own constructor function, which is called from the [application's constructor function](../../learn/beginner/00-app-anatomy.md). This is where `keeper`s are instantiated, and where developers make sure to pass correct instances of modules' `keeper`s to other modules that require them.
## Implementing Methods
`Keeper`s primarily expose methods for business logic, as validity checks should have already been performed by the [`Msg` server](./03-msg-services.md) when `keeper`s' methods are called.
<!-- markdown-link-check-disable -->
State management is recommended to be done via [Collections](../packages/collections)
<!-- The above link is created via the script to generate docs -->
## State Management
In the Cosmos SDK, it is crucial to be methodical and selective when managing state within a module, as improper state management can lead to inefficiency, security risks, and scalability issues. Not all data belongs in the on-chain state; it's important to store only essential blockchain data that needs to be verified by consensus. Storing unnecessary information, especially client-side data, can bloat the state and slow down performance. Instead, developers should focus on using an off-chain database to handle supplementary data, extending the API as needed. This approach minimizes on-chain complexity, optimizes resource usage, and keeps the blockchain state lean and efficient, ensuring scalability and smooth operations.
The Cosmos SDK leverages Protocol Buffers (protobuf) for efficient state management, providing a well-structured, binary encoding format that ensures compatibility and performance across different modules. The SDKs recommended approach for managing state is through the [collections package](../pacakges/02-collections.md), which simplifies state handling by offering predefined data structures like maps and indexed sets, reducing the complexity of managing raw state data. While users can opt for custom encoding schemes if they need more flexibility or have specialized requirements, they should be aware that such custom implementations may not integrate seamlessly with indexers that decode state data on the fly. This could lead to challenges in data retrieval, querying, and interoperability, making protobuf a safer and more future-proof choice for most use cases.
# Folder Structure
:::note Synopsis
This document outlines the structure of Cosmos SDK modules. These ideas are meant to be applied as suggestions. Application developers are encouraged to improve upon and contribute to module structure and development design.
The required interface for a module is located in the module.go. Everything beyond this is suggestive.
:::
## Structure
A typical Cosmos SDK module can be structured as follows:
```shell
proto
└── {project_name}
   └── {module_name}
   └── {proto_version}
      ├── {module_name}.proto
      ├── genesis.proto
      ├── query.proto
      └── tx.proto
```
- `{module_name}.proto`: The module's common message type definitions.
- `genesis.proto`: The module's message type definitions related to genesis state.
- `query.proto`: The module's Query service and related message type definitions.
- `tx.proto`: The module's Msg service and related message type definitions.
```shell
x/{module_name}
├── client
│   ├── cli
│   │ ├── query.go
│   │   └── tx.go
│   └── testutil
│   ├── cli_test.go
│   └── suite.go
├── exported
│   └── exported.go
├── keeper
│   ├── genesis.go
│   ├── grpc_query.go
│   ├── hooks.go
│   ├── invariants.go
│   ├── keeper.go
│   ├── keys.go
│   ├── msg_server.go
│   └── querier.go
├── simulation
│   ├── decoder.go
│   ├── genesis.go
│   ├── operations.go
│   └── params.go
├── types
│   ├── {module_name}.pb.go
│ ├── codec.go
│ ├── errors.go
│ ├── events.go
│ ├── events.pb.go
│ ├── expected_keepers.go
│ ├── genesis.go
│ ├── genesis.pb.go
│ ├── keys.go
│ ├── msgs.go
│ ├── params.go
│ ├── query.pb.go
│ └── tx.pb.go
├── module.go
├── abci.go
├── autocli.go
├── depinject.go
└── README.md
```
- `client/`: The module's CLI client functionality implementation and the module's CLI testing suite.
- `exported/`: The module's exported types - typically interface types. If a module relies on keepers from another module, it is expected to receive the keepers as interface contracts through the `expected_keepers.go` file (see below) in order to avoid a direct dependency on the module implementing the keepers. However, these interface contracts can define methods that operate on and/or return types that are specific to the module that is implementing the keepers and this is where `exported/` comes into play. The interface types that are defined in `exported/` use canonical types, allowing for the module to receive the keepers as interface contracts through the `expected_keepers.go` file. This pattern allows for code to remain DRY and also alleviates import cycle chaos.
- `keeper/`: The module's `Keeper` and `MsgServer` implementation.
- `abci.go`: The module's `BeginBlocker` and `EndBlocker` implementations (this file is only required if `BeginBlocker` and/or `EndBlocker` need to be defined).
- `simulation/`: The module's [simulation](./14-simulator.md) package defines functions used by the blockchain simulator application (`simapp`).
- `README.md`: The module's specification documents outlining important concepts, state storage structure, and message and event type definitions. Learn more how to write module specs in the [spec guidelines](../spec/SPEC_MODULE.md).
- `types/`: includes type definitions for messages, events, and genesis state, including the type definitions generated by Protocol Buffers.
- `codec.go`: The module's registry methods for interface types.
- `errors.go`: The module's sentinel errors.
- `events.go`: The module's event types and constructors.
- `expected_keepers.go`: The module's [expected keeper](./06-keeper.md#type-definition) interfaces.
- `genesis.go`: The module's genesis state methods and helper functions.
- `keys.go`: The module's store keys and associated helper functions.
- `msgs.go`: The module's message type definitions and associated methods.
- `params.go`: The module's parameter type definitions and associated methods.
- `*.pb.go`: The module's type definitions generated by Protocol Buffers (as defined in the respective `*.proto` files above).
- The root directory includes the module's `AppModule` implementation.
- `autocli.go`: The module [autocli](https://docs.cosmos.network/main/core/autocli) options.
- `depinject.go`: The module [depinject](./15-depinject.md#type-definition) options.
> Note: although the above pattern is followed by most of the Cosmos SDK modules, there are some modules that don't follow this pattern. E.g `x/group` and `x/nft` dont have a `types` folder, instead all of the type definitions for messages, events, and genesis state are live in the root directory and the module's `AppModule` implementation lives in the `module` folder.
---
## sidebar_position: 1
# `Msg` Services
:::note Synopsis
A Protobuf `Msg` service processes [messages](./02-messages-and-queries.md#messages). Protobuf `Msg` services are specific to the module in which they are defined, and only process messages defined within the said module. They are called from `BaseApp` during [`FinalizeBlock`](../../learn/advanced/00-baseapp.md#finalizeblock).
:::
:::note Pre-requisite Readings
- [Module Manager](./01-module-manager.md)
- [Messages and Queries](./02-messages-and-queries.md)
:::
## Implementation of a module `Msg` service
Each module should define a Protobuf `Msg` service, which will be responsible for processing requests (implementing `sdk.Msg`) and returning responses.
As further described in [ADR 031](../architecture/adr-031-msg-service.md), this approach has the advantage of clearly specifying return types and generating server and client code.
Protobuf generates a `MsgServer` interface based on the definition of `Msg` service. It is the role of the module developer to implement this interface, by implementing the state transition logic that should happen upon receival of each `transaction.Msg`. As an example, here is the generated `MsgServer` interface for `x/bank`, which exposes two `transaction.Msg`s:
```go reference
https://github.com/cosmos/cosmos-sdk/blob/28fa3b8/x/bank/types/tx.pb.go#L564-L579
```
When possible, the existing module's [`Keeper`](./06-keeper.md) should implement `MsgServer`, otherwise a `msgServer` struct that embeds the `Keeper` can be created, typically in `./keeper/msg_server.go`:
```go reference
https://github.com/cosmos/cosmos-sdk/blob/28fa3b8/x/bank/keeper/msg_server.go#L16-L19
```
`msgServer` methods can retrieve the auxiliary information or services using the environment variable, it is always located in the keeper:
Environment:
```go reference
https://github.com/cosmos/cosmos-sdk/blob/07151304e2ec6a185243d083f59a2d543253cb15/core/appmodule/v2/environment.go#L14-L29
```
Keeper Example:
```go reference
https://github.com/cosmos/cosmos-sdk/blob/07151304e2ec6a185243d083f59a2d543253cb15/x/bank/keeper/keeper.go#L56-L58
```
`transaction.Msg` processing usually follows these 3 steps:
### Validation
The message server must perform all validation required (both _stateful_ and _stateless_) to make sure the `message` is valid.
The `signer` is charged for the gas cost of this validation.
For example, a `msgServer` method for a `transfer` message should check that the sending account has enough funds to actually perform the transfer.
It is recommended to implement all validation checks in a separate function that passes state values as arguments. This implementation simplifies testing. As expected, expensive validation functions charge additional gas. Example:
```go
ValidateMsgA(msg MsgA, now Time, gm GasMeter) error {
if now.Before(msg.Expire) {
return sdkerrors.ErrInvalidRequest.Wrap("msg expired")
}
gm.ConsumeGas(1000, "signature verification")
return signatureVerificaton(msg.Prover, msg.Data)
}
```
:::warning
Previously, the `ValidateBasic` method was used to perform simple and stateless validation checks.
This way of validating is deprecated, this means the `msgServer` must perform all validation checks.
:::
### State Transition
After the validation is successful, the `msgServer` method uses the [`keeper`](./06-keeper.md) functions to access the state and perform a state transition.
### Events
Before returning, `msgServer` methods generally emit one or more [events](../../learn/advanced/08-events.md) by using the `EventManager` held in `environment`.
There are two ways to emit events, typed events using protobuf or arbitrary key & values.
Typed Events:
```go
ctx.EventManager().EmitTypedEvent(
&group.EventABC{Key1: Value1, Key2, Value2})
```
Arbitrary Events:
```go
ctx.EventManager().EmitEvent(
sdk.NewEvent(
eventType, // e.g. sdk.EventTypeMessage for a message, types.CustomEventType for a custom event defined in the module
sdk.NewAttribute(key1, value1),
sdk.NewAttribute(key2, value2),
),
)
```
These events are relayed back to the underlying consensus engine and can be used by service providers to implement services around the application. Click [here](../../learn/advanced/08-events.md) to learn more about events.
The invoked `msgServer` method returns a `proto.Message` response and an `error`. These return values are then wrapped into an `*sdk.Result` or an `error`:
```go reference
https://github.com/cosmos/cosmos-sdk/blob/v0.50.0-alpha.0/baseapp/msg_service_router.go#L160
```
This method takes care of marshaling the `res` parameter to protobuf and attaching any events on the `EventManager()` to the `sdk.Result`.
```protobuf reference
https://github.com/cosmos/cosmos-sdk/blob/v0.50.0-alpha.0/proto/cosmos/base/abci/v1beta1/abci.proto#L93-L113
```
This diagram shows a typical structure of a Protobuf `Msg` service, and how the message propagates through the module.
```mermaid
sequenceDiagram
participant User
participant baseApp
participant router
participant handler
participant msgServer
participant keeper
participant EventManager
User->>baseApp: Transaction Type<Tx>
baseApp->>router: Route(ctx, msgRoute)
router->>handler: handler
handler->>msgServer: Msg<Tx>(Context, Msg(..))
alt addresses invalid, denominations wrong, etc.
msgServer->>handler: error
handler->>router: error
router->>baseApp: result, error code
else
msgServer->>keeper: perform action, update context
keeper->>msgServer: results, error code
msgServer->>EventManager: Emit relevant events
msgServer->>msgServer: maybe wrap results in more structure
msgServer->>handler: result, error code
handler->>router: result, error code
router->>baseApp: result, error code
end
baseApp->>User: result, error code
```
## Telemetry
New [telemetry metrics](../../learn/advanced/09-telemetry.md) can be created from `msgServer` methods when handling messages.
This is an example from the `x/auth/vesting` module:
```go reference
https://github.com/cosmos/cosmos-sdk/blob/v0.50.0-alpha.0/x/auth/vesting/msg_server.go#L76-L88
```
:::Warning
Telemetry adds a performance overhead to the chain. It is recommended to only use this in critical paths
:::
---
## sidebar_position: 1
# Query Services
:::note Synopsis
A Protobuf Query service processes [`queries`](./02-messages-and-queries.md#queries). Query services are specific to the module in which they are defined, and only process `queries` defined within said module. They are called from `BaseApp`'s [`Query` method](../../learn/advanced/00-baseapp.md#query).
:::
:::note Pre-requisite Readings
- [Module Manager](./01-module-manager.md)
- [Messages and Queries](./02-messages-and-queries.md)
:::
## Implementation of a module query service
### gRPC Service
When defining a Protobuf `Query` service, a `QueryServer` interface is generated for each module with all the service methods:
```go
type QueryServer interface {
QueryBalance(context.Context, *QueryBalanceParams) (*types.Coin, error)
QueryAllBalances(context.Context, *QueryAllBalancesParams) (*QueryAllBalancesResponse, error)
}
```
These custom queries methods should be implemented by a module's keeper, typically in `./keeper/grpc_query.go`. The first parameter of these methods is a generic `context.Context`. Therefore, the Cosmos SDK provides a function `sdk.UnwrapSDKContext` to retrieve the `context.Context` from the provided
`context.Context`.
Here's an example implementation for the bank module:
```go reference
https://github.com/cosmos/cosmos-sdk/blob/v0.50.0-alpha.0/x/bank/keeper/grpc_query.go
```
### Calling queries from the State Machine
The Cosmos SDK v0.47 introduces a new `cosmos.query.v1.module_query_safe` Protobuf annotation which is used to state that a query that is safe to be called from within the state machine, for example:
- a Keeper's query function can be called from another module's Keeper,
- ADR-033 intermodule query calls,
- CosmWasm contracts can also directly interact with these queries.
If the `module_query_safe` annotation set to `true`, it means:
- The query is deterministic: given a block height it will return the same response upon multiple calls, and doesn't introduce any state-machine breaking changes across SDK patch versions.
- Gas consumption never fluctuates across calls and across patch versions.
If you are a module developer and want to use `module_query_safe` annotation for your own query, you have to ensure the following things:
- the query is deterministic and won't introduce state-machine-breaking changes without coordinated upgrades
- it has its gas tracked, to avoid the attack vector where no gas is accounted for
on potentially high-computation queries.
***
sidebar_position: 1
---
# Blockchain Architecture
## Introduction
Blockchain architecture is a complex topic that involves many different components. In this section, we will cover the main layers of a blockchain application built with the Cosmos SDK.
At its core, a blockchain is a replicated deterministic state machine. This document explores the various layers of blockchain architecture, focusing on the execution, settlement, consensus, data availability, and interoperability layers.
```mermaid
graph TD
A[Modular SDK Blockchain Architecture]
A --> B[Execution Layer]
A --> C[Settlement Layer]
A --> D[Consensus Layer]
D --> E[Data Availability Layer]
A --> F[Interoperability Layer]
```
## Layered Architecture
Understanding blockchain architecture through the lens of different layers helps in comprehending its complex functionalities. We will give a high-level overview of the execution layer, settlement layer, consensus layer, data availability layer, and interoperability layer.
## Execution Layer
The Execution Layer is where the blockchain processes and executes transactions. The state machine within the blockchain handles the execution of transaction logic. This is done by the blockchain itself, ensuring that every transaction follows the predefined rules and state transitions. When a transaction is submitted, the execution layer processes it, updates the state, and ensures that the output is deterministic and consistent across all nodes. In the context of the Cosmos SDK, this typically involves predefined modules and transaction types rather than general-purpose smart contracts, which are used in chains with CosmWasm.
### State machine
At its core, a blockchain is a [replicated deterministic state machine](https://en.wikipedia.org/wiki/State_machine_replication).
A state machine is a computer science concept whereby a machine can have multiple states, but only one at any given time. There is a `state`, which describes the current state of the system, and `transactions`, that trigger state transitions.
Given a state S and a transaction T, the state machine will return a new state S'.
```mermaid
flowchart LR
A[S]
B[S']
A -->|"apply(T)"| B
```
In practice, the transactions are bundled in blocks to make the process more efficient. Given a state S and a block of transactions B, the state machine will return a new state S'.
```mermaid
flowchart LR
A[S]
B[S']
A -->|"For each T in B: apply(T)"| B
```
In a blockchain context, the state machine is [deterministic](https://en.wikipedia.org/wiki/Deterministic_system). This means that if a node is started at a given state and replays the same sequence of transactions, it will always end up with the same final state.
The Cosmos SDK gives developers maximum flexibility to define the state of their application, transaction types and state transition functions. The process of building state machines with the Cosmos SDK will be described more in-depth in the following sections. But first, let us see how the state machine is replicated using various consensus engines, such as CometBFT.
## Settlement Layer
The Settlement Layer is responsible for finalising and recording transactions on the blockchain. This layer ensures that all transactions are accurately settled and immutable, providing a verifiable record of all activities on the blockchain. It is critical for maintaining the integrity and trustworthiness of the blockchain.
The settlement layer can be performed on the chain itself or it can be externalised, allowing for the possibility of plugging in a different settlement layer as needed. For example if we were to use Rollkit and celestia for our Data Availability and Consensus, we could separate our settlement layer by introducing fraud or validity proofs. From there the settlement layer can create trust-minimised light clients, further enhancing security and efficiency. This process ensures that all transactions are accurately finalized and immutable, providing a verifiable record of all activities.
## Consensus Layer
The Consensus Layer ensures that all nodes in the network agree on the order and validity of transactions. This layer uses consensus algorithms like Byzantine Fault Tolerance (BFT) or Proof of Stake (PoS) to achieve agreement, even in the presence of malicious nodes. Consensus is crucial for maintaining the security and reliability of the blockchain.
What has been a default consensus engine in the Cosmos SDK has been CometBFT. In the most recent releases we have been moving away from this and allowing users to plug and play their own consensus engines. This is a big step forward for the Cosmos SDK as it allows for more flexibility and customisation. Other consensus engine options for example can be Rollkit with Celestias Data Availability Layer.
Here is an example of how the consensus layer works with CometBFT in the context of the Cosmos SDK:
### CometBFT
Thanks to the Cosmos SDK, developers just have to define the state machine, and [_CometBFT_](https://docs.cometbft.com/v1.0/explanation/introduction/) will handle replication over the network for them.
```mermaid
flowchart TD
subgraph Blockchain_Node[Blockchain Node]
subgraph SM[State-machine]
direction TB
SM1[Cosmos SDK]
end
subgraph CometBFT[CometBFT]
direction TB
Consensus
Networking
end
end
SM <--> CometBFT
Blockchain_Node -->|Includes| SM
Blockchain_Node -->|Includes| CometBFT
```
[CometBFT](https://docs.cometbft.com/v1.0/explanation/introduction/) is an application-agnostic engine that is responsible for handling the _networking_ and _consensus_ layers of a blockchain. In practice, this means that CometBFT is responsible for propagating and ordering transaction bytes. CometBFT relies on an eponymous Byzantine-Fault-Tolerant (BFT) algorithm to reach consensus on the order of transactions.
The [consensus algorithm adopted by CometBFT](https://docs.cometbft.com/v1.0/explanation/introduction/#consensus-overview) works with a set of special nodes called _Validators_. Validators are responsible for adding blocks of transactions to the blockchain. At any given block, there is a validator set V. A validator in V is chosen by the algorithm to be the proposer of the next block. This block is considered valid if more than two thirds of V signed a `prevote` and a `precommit` on it, and if all the transactions that it contains are valid. The validator set can be changed by rules written in the state-machine.
## ABCI
CometBFT passes transactions to the application through an interface called the [ABCI](https://docs.cometbft.com/v1.0/spec/abci/), which the application must implement.
```mermaid
graph TD
A[Application]
B[CometBFT]
A <-->|ABCI| B
```
Note that **CometBFT only handles transaction bytes**. It has no knowledge of what these bytes mean. All CometBFT does is order these transaction bytes deterministically. CometBFT passes the bytes to the application via the ABCI, and expects a return code to inform it if the messages contained in the transactions were successfully processed or not.
Here are the most important messages of the ABCI:
- `CheckTx`: When a transaction is received by CometBFT, it is passed to the application to check if a few basic requirements are met. `CheckTx` is used to protect the mempool of full-nodes against spam transactions. A special handler called the [`AnteHandler`](../beginner/04-gas-fees.md#antehandler) is used to execute a series of validation steps such as checking for sufficient fees and validating the signatures. If the checks are valid, the transaction is added to the [mempool](https://docs.cometbft.com/v1.0/explanation/core/mempool) and relayed to peer nodes. Note that transactions are not processed (i.e. no modification of the state occurs) with `CheckTx` since they have not been included in a block yet.
- `DeliverTx`: When a [valid block](https://docs.cometbft.com/v1.0/spec/core/data_structures#block) is received by CometBFT, each transaction in the block is passed to the application via `DeliverTx` in order to be processed. It is during this stage that the state transitions occur. The `AnteHandler` executes again, along with the actual [`Msg` service](../../build/building-modules/03-msg-services.md) RPC for each message in the transaction.
- `BeginBlock`/`EndBlock`: These messages are executed at the beginning and the end of each block, whether the block contains transactions or not. It is useful to trigger automatic execution of logic. Proceed with caution though, as computationally expensive loops could slow down your blockchain, or even freeze it if the loop is infinite.
Find a more detailed view of the ABCI methods from the [CometBFT docs](https://docs.cometbft.com/v1.0/spec/abci/).
Any application built on CometBFT needs to implement the ABCI interface in order to communicate with the underlying local CometBFT engine. Fortunately, you do not have to implement the ABCI interface. The Cosmos SDK provides a boilerplate implementation of it in the form of [baseapp](./03-sdk-design.md#baseapp).
## Data Availability Layer
The Data Availability (DA) Layer is a critical component of within the umbrella of the consensus layer that ensures all necessary data for transactions is available to all network participants. This layer is essential for preventing data withholding attacks, where some nodes might attempt to disrupt the network by not sharing critical transaction data.
If we use the example of Rollkit, a user initiates a transaction, which is then propagated through the rollup network by a light node. The transaction is validated by full nodes and aggregated into a block by the sequencer. This block is posted to a data availability layer like Celestia, ensuring the data is accessible and correctly ordered. The rollup light node verifies data availability from the DA layer. Full nodes then validate the block and generate necessary proofs, such as fraud proofs for optimistic rollups or zk-SNARKs/zk-STARKs for zk-rollups. These proofs are shared across the network and verified by other nodes, ensuring the rollup's integrity. Once all validations are complete, the rollup's state is updated, finalising the transaction
## Interoperability Layer
The Interoperability Layer enables communication and interaction between different blockchains. This layer facilitates cross-chain transactions and data sharing, allowing various blockchain networks to interoperate seamlessly. Interoperability is key for building a connected ecosystem of blockchains, enhancing their functionality and reach.
In this case we have separated the layers even further to really illustrate the components that make-up the blockchain architecture and it is important to note that the Cosmos SDK is designed to be interoperable with other blockchains. This is achieved through the use of the [Inter-Blockchain Communication (IBC) protocol](https://www.ibcprotocol.dev/), which allows different blockchains to communicate and transfer assets between each other.
---
## sidebar_position: 1
# Application-Specific Blockchains
:::note Synopsis
This document explains what application-specific blockchains are, and why developers would want to build one as opposed to writing Smart Contracts.
:::
## What are application-specific blockchains
Application-specific blockchains are blockchains customized to operate a single application. Instead of building a decentralized application on top of an underlying blockchain like Ethereum, developers build their own blockchain from the ground up. This means building a full-node client, a light-client, and all the necessary interfaces (CLI, REST, ...) to interact with the nodes.
```mermaid
flowchart TD
subgraph Blockchain_Node[Blockchain Node]
subgraph SM[State-machine]
direction TB
SM1[Cosmos SDK]
end
subgraph Consensus[Consensus]
direction TB
end
subgraph Networking[Networking]
direction TB
end
end
SM <--> Consensus
Consensus <--> Networking
Blockchain_Node -->|Includes| SM
Blockchain_Node -->|Includes| Consensus
Blockchain_Node -->|Includes| Networking
```
## What are the shortcomings of Smart Contracts
Virtual-machine blockchains like Ethereum addressed the demand for more programmability back in 2014. At the time, the options available for building decentralized applications were quite limited. Most developers would build on top of the complex and limited Bitcoin scripting language, or fork the Bitcoin codebase which was hard to work with and customize.
Virtual-machine blockchains came in with a new value proposition. Their state-machine incorporates a virtual-machine that is able to interpret turing-complete programs called Smart Contracts. These Smart Contracts are very good for use cases like one-time events (e.g. ICOs), but they can fall short for building complex decentralized platforms. Here is why:
- Smart Contracts are generally developed with specific programming languages that can be interpreted by the underlying virtual-machine. These programming languages are often immature and inherently limited by the constraints of the virtual-machine itself. For example, the Ethereum Virtual Machine does not allow developers to implement automatic execution of code. Developers are also limited to the account-based system of the EVM, and they can only choose from a limited set of functions for their cryptographic operations. These are examples, but they hint at the lack of **flexibility** that a smart contract environment often entails.
- Smart Contracts are all run by the same virtual machine. This means that they compete for resources, which can severely restrain **performance**. And even if the state-machine were to be split in multiple subsets (e.g. via sharding), Smart Contracts would still need to be interpreted by a virtual machine, which would limit performance compared to a native application implemented at state-machine level (our benchmarks show an improvement on the order of 10x in performance when the virtual-machine is removed).
- Another issue with the fact that Smart Contracts share the same underlying environment is the resulting limitation in **sovereignty**. A decentralized application is an ecosystem that involves multiple players. If the application is built on a general-purpose virtual-machine blockchain, stakeholders have very limited sovereignty over their application, and are ultimately superseded by the governance of the underlying blockchain. If there is a bug in the application, very little can be done about it.
Application-Specific Blockchains are designed to address these shortcomings.
## Application-Specific Blockchains Benefits
### Flexibility
Application-specific blockchains give maximum flexibility to developers:
- In Cosmos blockchains, the state-machine is typically connected to the underlying consensus engine via an interface called the [ABCI](https://docs.cometbft.com/v1.0/spec/abci/) (Application Blockchain Interface). This interface can be wrapped in any programming language, meaning developers can build their state-machine in the programming language of their choice.
- Developers can choose among multiple frameworks to build their state-machine. The most widely used today is the Cosmos SDK, but others exist (e.g. [Lotion](https://github.com/nomic-io/lotion), [Weave](https://github.com/iov-one/weave), ...). Typically the choice will be made based on the programming language they want to use (Cosmos SDK and Weave are in Golang, Lotion is in Javascript, ...).
- The ABCI also allows developers to swap the consensus engine of their application-specific blockchain. Today, only CometBFT is production-ready, but in the future other consensus engines are expected to emerge.
- Even when they settle for a framework and consensus engine, developers still have the freedom to tweak them if they don't perfectly match their requirements in their pristine forms.
- Developers are free to explore the full spectrum of tradeoffs (e.g. number of validators vs transaction throughput, safety vs availability in asynchrony, ...) and design choices (DB or IAVL tree for storage, UTXO or account model, ...).
- Developers can implement automatic execution of code. In the Cosmos SDK, logic can be automatically triggered at the beginning and the end of each block. They are also free to choose the cryptographic library used in their application, as opposed to being constrained by what is made available by the underlying environment in the case of virtual-machine blockchains.
The list above contains a few examples that show how much flexibility application-specific blockchains give to developers. The goal of Cosmos and the Cosmos SDK is to make developer tooling as generic and composable as possible, so that each part of the stack can be forked, tweaked and improved without losing compatibility. As the community grows, more alternatives for each of the core building blocks will emerge, giving more options to developers.
### Performance
Decentralized applications built with Smart Contracts are inherently capped in performance by the underlying environment. For a decentralized application to optimise performance, it needs to be built as an application-specific blockchain. Next are some of the benefits an application-specific blockchain brings in terms of performance:
- Developers of application-specific blockchains can choose to operate with a novel consensus engine such as CometBFT.
- An application-specific blockchain only operates a single application, so that the application does not compete with others for computation and storage. This is the opposite of most non-sharded virtual-machine blockchains today, where smart contracts all compete for computation and storage.
- Even if a virtual-machine blockchain offered application-based sharding coupled with an efficient consensus algorithm, performance would still be limited by the virtual-machine itself. The real throughput bottleneck is the state-machine, and requiring transactions to be interpreted by a virtual-machine significantly increases the computational complexity of processing them.
### Security
Security is hard to quantify, and greatly varies from platform to platform. That said here are some important benefits an application-specific blockchain can bring in terms of security:
- Developers can choose proven programming languages like Go when building their application-specific blockchains, as opposed to smart contract programming languages that are often more immature.
- Developers are not constrained by the cryptographic functions made available by the underlying virtual-machines. They can use their own custom cryptography, and rely on well-audited crypto libraries.
- Developers do not have to worry about potential bugs or exploitable mechanisms in the underlying virtual-machine, making it easier to reason about the security of the application.
### Sovereignty
One of the major benefits of application-specific blockchains is sovereignty. A decentralized application is an ecosystem that involves many actors: users, developers, third-party services, and more. When developers build on virtual-machine blockchain where many decentralized applications coexist, the community of the application is different than the community of the underlying blockchain, and the latter supersedes the former in the governance process. If there is a bug or if a new feature is needed, stakeholders of the application have very little leeway to upgrade the code. If the community of the underlying blockchain refuses to act, nothing can happen.
The fundamental issue here is that the governance of the application and the governance of the network are not aligned. This issue is solved by application-specific blockchains. Because application-specific blockchains specialize to operate a single application, stakeholders of the application have full control over the entire chain. This ensures that the community will not be stuck if a bug is discovered, and that it has the freedom to choose how it is going to evolve.

View File

@ -1,40 +0,0 @@
# Interchain Accounts
:::note Synopsis
Learn about what the Interchain Accounts module is
:::
## What is the Interchain Accounts module?
Interchain Accounts is the Cosmos SDK implementation of the ICS-27 protocol, which enables cross-chain account management built upon IBC.
- How does an interchain account differ from a regular account?
Regular accounts use a private key to sign transactions. Interchain Accounts are instead controlled programmatically by counterparty chains via IBC packets.
## Concepts
`Host Chain`: The chain where the interchain account is registered. The host chain listens for IBC packets from a controller chain which should contain instructions (e.g. Cosmos SDK messages) for which the interchain account will execute.
`Controller Chain`: The chain registering and controlling an account on a host chain. The controller chain sends IBC packets to the host chain to control the account.
`Interchain Account`: An account on a host chain created using the ICS-27 protocol. An interchain account has all the capabilities of a normal account. However, rather than signing transactions with a private key, a controller chain will send IBC packets to the host chain which signals what transactions the interchain account should execute.
`Authentication Module`: A custom application module on the controller chain that uses the Interchain Accounts module to build custom logic for the creation & management of interchain accounts. It can be either an IBC application module using the [legacy API](10-legacy/03-keeper-api.md), or a regular Cosmos SDK application module sending messages to the controller submodule's `MsgServer` (this is the recommended approach from ibc-go v6 if access to packet callbacks is not needed). Please note that the legacy API will eventually be removed and IBC applications will not be able to use them in later releases.
## SDK security model
SDK modules on a chain are assumed to be trustworthy. For example, there are no checks to prevent an untrustworthy module from accessing the bank keeper.
The implementation of ICS-27 in ibc-go uses this assumption in its security considerations.
The implementation assumes other IBC application modules will not bind to ports within the ICS-27 namespace.
## Channel Closure
The provided interchain account host and controller implementations do not support `ChanCloseInit`. However, they do support `ChanCloseConfirm`.
This means that the host and controller modules cannot close channels, but they will confirm channel closures initiated by other implementations of ICS-27.
In the event of a channel closing (due to a packet timeout in an ordered channel, for example), the interchain account associated with that channel can become accessible again if a new channel is created with a (JSON-formatted) version string that encodes the exact same `Metadata` information of the previous channel. The channel can be reopened using either [`MsgRegisterInterchainAccount`](./05-messages.md#msgregisterinterchainaccount) or `MsgChannelOpenInit`. If `MsgRegisterInterchainAccount` is used, then it is possible to leave the `version` field of the message empty, since it will be filled in by the controller submodule. If `MsgChannelOpenInit` is used, then the `version` field must be provided with the correct JSON-encoded `Metadata` string. See section [Understanding Active Channels](./09-active-channels.md#understanding-active-channels) for more information.
When reopening a channel with the default controller submodule, the ordering of the channel cannot be changed. In order to change the ordering of the channel, the channel has to go through a [channel upgrade handshake](../../01-ibc/06-channel-upgrades.md) or reopen the channel with a custom controller implementation.

View File

@ -1,310 +0,0 @@
---
title: Overview
---
# Overview
:::note Synopsis
Learn about what the Fee Middleware module is, and how to build custom modules that utilize the Fee Middleware functionality
:::
## What is the Fee Middleware module?
IBC does not depend on relayer operators for transaction verification. However, the relayer infrastructure ensures liveness of the Interchain network — operators listen for packets sent through channels opened between chains, and perform the vital service of ferrying these packets (and proof of the transaction on the sending chain/receipt on the receiving chain) to the clients on each side of the channel.
Though relaying is permissionless and completely decentralized and accessible, it does come with operational costs. Running full nodes to query transaction proofs and paying for transaction fees associated with IBC packets are two of the primary cost burdens which have driven the overall discussion on **a general, in-protocol incentivization mechanism for relayers**.
Initially, a [simple proposal](https://github.com/cosmos/ibc/pull/577/files) was created to incentivize relaying on ICS20 token transfers on the destination chain. However, the proposal was specific to ICS20 token transfers and would have to be reimplemented in this format on every other IBC application module.
After much discussion, the proposal was expanded to a [general incentivisation design](https://github.com/cosmos/ibc/tree/master/spec/app/ics-029-fee-payment) that can be adopted by any ICS application protocol as [middleware](../../01-ibc/04-middleware/02-develop.md).
## Concepts
ICS29 fee payments in this middleware design are built on the assumption that sender chains are the source of incentives — the chain on which packets are incentivized is the chain that distributes fees to relayer operators. However, as part of the IBC packet flow, messages have to be submitted on both sender and destination chains. This introduces the requirement of a mapping of relayer operator's addresses on both chains.
To achieve the stated requirements, the **fee middleware module has two main groups of functionality**:
- Registering of relayer addresses associated with each party involved in relaying the packet on the source chain. This registration process can be automated on start up of relayer infrastructure and happens only once, not every packet flow.
This is described in the [Fee distribution section](04-fee-distribution.md).
- Escrowing fees by any party which will be paid out to each rightful party on completion of the packet lifecycle.
This is described in the [Fee messages section](03-msgs.md).
We complete the introduction by giving a list of definitions of relevant terminology.
`Forward relayer`: The relayer that submits the `MsgRecvPacket` message for a given packet (on the destination chain).
`Reverse relayer`: The relayer that submits the `MsgAcknowledgement` message for a given packet (on the source chain).
`Timeout relayer`: The relayer that submits the `MsgTimeout` or `MsgTimeoutOnClose` messages for a given packet (on the source chain).
`Payee`: The account address on the source chain to be paid on completion of the packet lifecycle. The packet lifecycle on the source chain completes with the receipt of a `MsgTimeout`/`MsgTimeoutOnClose` or a `MsgAcknowledgement`.
`Counterparty payee`: The account address to be paid on completion of the packet lifecycle on the destination chain. The package lifecycle on the destination chain completes with a successful `MsgRecvPacket`.
`Refund address`: The address of the account paying for the incentivization of packet relaying. The account is refunded timeout fees upon successful acknowledgement. In the event of a packet timeout, both acknowledgement and receive fees are refunded.
## Known Limitations
- At the time of the release of the feature (ibc-go v4) fee payments middleware only supported incentivisation of new channels; however, with the release of channel upgradeability (ibc-go v8.1) it is possible to enable incentivisation of all existing channels.
- Even though unlikely, there exists a DoS attack vector on a fee-enabled channel if 1) there exists a relayer software implementation that is incentivised to timeout packets if the timeout fee is greater than the sum of the fees to receive and acknowledge the packet, and 2) only this type of implementation is used by operators relaying on the channel. In this situation, an attacker could continuously incentivise the relayers to never deliver the packets by incrementing the timeout fee of the packets above the sum of the receive and acknowledge fees. However, this situation is unlikely to occur because 1) another relayer behaving honestly could relay the packets before they timeout, and 2) the attack would be costly because the attacker would need to incentivise the timeout fee of the packets with their own funds. Given the low impact and unlikelihood of the attack we have decided to accept this risk and not implement any mitigation mesaures.
## Module Integration
The Fee Middleware module, as the name suggests, plays the role of an IBC middleware and as such must be configured by chain developers to route and handle IBC messages correctly.
For Cosmos SDK chains this setup is done via the `app/app.go` file, where modules are constructed and configured in order to bootstrap the blockchain application.
## Example integration of the Fee Middleware module
```go
// app.go
// Register the AppModule for the fee middleware module
ModuleBasics = module.NewBasicManager(
...
ibcfee.AppModuleBasic{},
...
)
...
// Add module account permissions for the fee middleware module
maccPerms = map[string][]string{
...
ibcfeetypes.ModuleName: nil,
}
...
// Add fee middleware Keeper
type App struct {
...
IBCFeeKeeper ibcfeekeeper.Keeper
...
}
...
// Create store keys
keys := sdk.NewKVStoreKeys(
...
ibcfeetypes.StoreKey,
...
)
...
app.IBCFeeKeeper = ibcfeekeeper.NewKeeper(
appCodec, keys[ibcfeetypes.StoreKey],
app.IBCKeeper.ChannelKeeper, // may be replaced with IBC middleware
app.IBCKeeper.ChannelKeeper,
&app.IBCKeeper.PortKeeper, app.AccountKeeper, app.BankKeeper,
)
// See the section below for configuring an application stack with the fee middleware module
...
// Register fee middleware AppModule
app.moduleManager = module.NewManager(
...
ibcfee.NewAppModule(app.IBCFeeKeeper),
)
...
// Add fee middleware to begin blocker logic
app.moduleManager.SetOrderBeginBlockers(
...
ibcfeetypes.ModuleName,
...
)
// Add fee middleware to end blocker logic
app.moduleManager.SetOrderEndBlockers(
...
ibcfeetypes.ModuleName,
...
)
// Add fee middleware to init genesis logic
app.moduleManager.SetOrderInitGenesis(
...
ibcfeetypes.ModuleName,
...
)
```
## Configuring an application stack with Fee Middleware
As mentioned in [IBC middleware development](../../01-ibc/04-middleware/02-develop.md) an application stack may be composed of many or no middlewares that nest a base application.
These layers form the complete set of application logic that enable developers to build composable and flexible IBC application stacks.
For example, an application stack may be just a single base application like `transfer`, however, the same application stack composed with `29-fee` will nest the `transfer` base application
by wrapping it with the Fee Middleware module.
### Transfer
See below for an example of how to create an application stack using `transfer` and `29-fee`.
The following `transferStack` is configured in `app/app.go` and added to the IBC `Router`.
The in-line comments describe the execution flow of packets between the application stack and IBC core.
```go
// Create Transfer Stack
// SendPacket, since it is originating from the application to core IBC:
// transferKeeper.SendPacket -> fee.SendPacket -> channel.SendPacket
// RecvPacket, message that originates from core IBC and goes down to app, the flow is the other way
// channel.RecvPacket -> fee.OnRecvPacket -> transfer.OnRecvPacket
// transfer stack contains (from top to bottom):
// - IBC Fee Middleware
// - Transfer
// create IBC module from bottom to top of stack
var transferStack porttypes.IBCModule
transferStack = transfer.NewIBCModule(app.TransferKeeper)
transferStack = ibcfee.NewIBCMiddleware(transferStack, app.IBCFeeKeeper)
// Add transfer stack to IBC Router
ibcRouter.AddRoute(ibctransfertypes.ModuleName, transferStack)
```
### Interchain Accounts
See below for an example of how to create an application stack using `27-interchain-accounts` and `29-fee`.
The following `icaControllerStack` and `icaHostStack` are configured in `app/app.go` and added to the IBC `Router` with the associated authentication module.
The in-line comments describe the execution flow of packets between the application stack and IBC core.
```go
// Create Interchain Accounts Stack
// SendPacket, since it is originating from the application to core IBC:
// icaAuthModuleKeeper.SendTx -> icaController.SendPacket -> fee.SendPacket -> channel.SendPacket
// initialize ICA module with mock module as the authentication module on the controller side
var icaControllerStack porttypes.IBCModule
icaControllerStack = ibcmock.NewIBCModule(&mockModule, ibcmock.NewMockIBCApp("", scopedICAMockKeeper))
app.ICAAuthModule = icaControllerStack.(ibcmock.IBCModule)
icaControllerStack = icacontroller.NewIBCMiddleware(icaControllerStack, app.ICAControllerKeeper)
icaControllerStack = ibcfee.NewIBCMiddleware(icaControllerStack, app.IBCFeeKeeper)
// RecvPacket, message that originates from core IBC and goes down to app, the flow is:
// channel.RecvPacket -> fee.OnRecvPacket -> icaHost.OnRecvPacket
var icaHostStack porttypes.IBCModule
icaHostStack = icahost.NewIBCModule(app.ICAHostKeeper)
icaHostStack = ibcfee.NewIBCMiddleware(icaHostStack, app.IBCFeeKeeper)
// Add authentication module, controller and host to IBC router
ibcRouter.
// the ICA Controller middleware needs to be explicitly added to the IBC Router because the
// ICA controller module owns the port capability for ICA. The ICA authentication module
// owns the channel capability.
AddRoute(ibcmock.ModuleName+icacontrollertypes.SubModuleName, icaControllerStack) // ica with mock auth module stack route to ica (top level of middleware stack)
AddRoute(icacontrollertypes.SubModuleName, icaControllerStack).
AddRoute(icahosttypes.SubModuleName, icaHostStack).
```
## Fee Distribution
Packet fees are divided into 3 distinct amounts in order to compensate relayer operators for packet relaying on fee enabled IBC channels.
- `RecvFee`: The sum of all packet receive fees distributed to a payee for successful execution of `MsgRecvPacket`.
- `AckFee`: The sum of all packet acknowledgement fees distributed to a payee for successful execution of `MsgAcknowledgement`.
- `TimeoutFee`: The sum of all packet timeout fees distributed to a payee for successful execution of `MsgTimeout`.
## Register a counterparty payee address for forward relaying
As mentioned in [ICS29 Concepts](01-overview.md#concepts), the forward relayer describes the actor who performs the submission of `MsgRecvPacket` on the destination chain.
Fee distribution for incentivized packet relays takes place on the packet source chain.
> Relayer operators are expected to register a counterparty payee address, in order to be compensated accordingly with `RecvFee`s upon completion of a packet lifecycle.
The counterparty payee address registered on the destination chain is encoded into the packet acknowledgement and communicated as such to the source chain for fee distribution.
**If a counterparty payee is not registered for the forward relayer on the destination chain, the escrowed fees will be refunded upon fee distribution.**
### Relayer operator actions
A transaction must be submitted **to the destination chain** including a `CounterpartyPayee` address of an account on the source chain.
The transaction must be signed by the `Relayer`.
Note: If a module account address is used as the `CounterpartyPayee` but the module has been set as a blocked address in the `BankKeeper`, the refunding to the module account will fail. This is because many modules use invariants to compare internal tracking of module account balances against the actual balance of the account stored in the `BankKeeper`. If a token transfer to the module account occurs without going through this module and updating the account balance of the module on the `BankKeeper`, then invariants may break and unknown behaviour could occur depending on the module implementation. Therefore, if it is desirable to use a module account that is currently blocked, the module developers should be consulted to gauge to possibility of removing the module account from the blocked list.
```go
type MsgRegisterCounterpartyPayee struct {
// unique port identifier
PortId string
// unique channel identifier
ChannelId string
// the relayer address
Relayer string
// the counterparty payee address
CounterpartyPayee string
}
```
> This message is expected to fail if:
>
> - `PortId` is invalid (see [24-host naming requirements](https://github.com/cosmos/ibc/blob/master/spec/core/ics-024-host-requirements/README.md#paths-identifiers-separators).
> - `ChannelId` is invalid (see [24-host naming requirements](https://github.com/cosmos/ibc/blob/master/spec/core/ics-024-host-requirements/README.md#paths-identifiers-separators)).
> - `Relayer` is an invalid address (see [Cosmos SDK Addresses](https://github.com/cosmos/cosmos-sdk/blob/main/docs/learn/beginner/03-accounts.md#addresses)).
> - `CounterpartyPayee` is empty or contains more than 2048 bytes.
See below for an example CLI command:
```bash
simd tx ibc-fee register-counterparty-payee transfer channel-0 \
cosmos1rsp837a4kvtgp2m4uqzdge0zzu6efqgucm0qdh \
osmo1v5y0tz01llxzf4c2afml8s3awue0ymju22wxx2 \
--from cosmos1rsp837a4kvtgp2m4uqzdge0zzu6efqgucm0qdh
```
## Register an alternative payee address for reverse and timeout relaying
As mentioned in [ICS29 Concepts](01-overview.md#concepts), the reverse relayer describes the actor who performs the submission of `MsgAcknowledgement` on the source chain.
Similarly the timeout relayer describes the actor who performs the submission of `MsgTimeout` (or `MsgTimeoutOnClose`) on the source chain.
> Relayer operators **may choose** to register an optional payee address, in order to be compensated accordingly with `AckFee`s and `TimeoutFee`s upon completion of a packet life cycle.
If a payee is not registered for the reverse or timeout relayer on the source chain, then fee distribution assumes the default behaviour, where fees are paid out to the relayer account which delivers `MsgAcknowledgement` or `MsgTimeout`/`MsgTimeoutOnClose`.
### Relayer operator actions
A transaction must be submitted **to the source chain** including a `Payee` address of an account on the source chain.
The transaction must be signed by the `Relayer`.
Note: If a module account address is used as the `Payee` it is recommended to [turn off invariant checks](https://github.com/cosmos/ibc-go/blob/v7.0.0/testing/simapp/app.go#L727) for that module.
```go
type MsgRegisterPayee struct {
// unique port identifier
PortId string
// unique channel identifier
ChannelId string
// the relayer address
Relayer string
// the payee address
Payee string
}
```
> This message is expected to fail if:
>
> - `PortId` is invalid (see [24-host naming requirements](https://github.com/cosmos/ibc/blob/master/spec/core/ics-024-host-requirements/README.md#paths-identifiers-separators).
> - `ChannelId` is invalid (see [24-host naming requirements](https://github.com/cosmos/ibc/blob/master/spec/core/ics-024-host-requirements/README.md#paths-identifiers-separators)).
> - `Relayer` is an invalid address (see [Cosmos SDK Addresses](https://github.com/cosmos/cosmos-sdk/blob/main/docs/learn/beginner/03-accounts.md#addresses)).
> - `Payee` is an invalid address (see [Cosmos SDK Addresses](https://github.com/cosmos/cosmos-sdk/blob/main/docs/learn/beginner/03-accounts.md#addresses)).
See below for an example CLI command:
```bash
simd tx ibc-fee register-payee transfer channel-0 \
cosmos1rsp837a4kvtgp2m4uqzdge0zzu6efqgucm0qdh \
cosmos153lf4zntqt33a4v0sm5cytrxyqn78q7kz8j8x5 \
--from cosmos1rsp837a4kvtgp2m4uqzdge0zzu6efqgucm0qdh
```

View File

@ -1,178 +0,0 @@
---
title: Overview
---
# Overview
:::note Synopsis
Learn about what the token Transfer module is
:::
## What is the Transfer module?
Transfer is the Cosmos SDK implementation of the [ICS-20](https://github.com/cosmos/ibc/tree/master/spec/app/ics-020-fungible-token-transfer) protocol, which enables cross-chain fungible token transfers.
## Concepts
### Acknowledgements
ICS20 uses the recommended acknowledgement format as specified by [ICS 04](https://github.com/cosmos/ibc/tree/master/spec/core/ics-004-channel-and-packet-semantics#acknowledgement-envelope).
A successful receive of a transfer packet will result in a Result Acknowledgement being written
with the value `[]byte{byte(1)}` in the `Response` field.
An unsuccessful receive of a transfer packet will result in an Error Acknowledgement being written
with the error message in the `Response` field.
### Denomination trace
The denomination trace corresponds to the information that allows a token to be traced back to its
origin chain. It contains a sequence of port and channel identifiers ordered from the most recent to
the oldest in the timeline of transfers.
This information is included on the token's base denomination field in the form of a hash to prevent an
unbounded denomination length. For example, the token `transfer/channelToA/uatom` will be displayed
as `ibc/7F1D3FCF4AE79E1554D670D1AD949A9BA4E4A3C76C63093E17E446A46061A7A2`. The human readable denomination
is stored using `x/bank` module's [denom metadata](https://docs.cosmos.network/main/build/modules/bank#denom-metadata)
feature. You may display the human readable denominations by querying balances with the `--resolve-denom` flag, as in:
```shell
simd query bank balances [address] --resolve-denom
```
Each send to any chain other than the one it was previously received from is a movement forwards in
the token's timeline. This causes trace to be added to the token's history and the destination port
and destination channel to be prefixed to the denomination. In these instances the sender chain is
acting as the "source zone". When the token is sent back to the chain it previously received from, the
prefix is removed. This is a backwards movement in the token's timeline and the sender chain is
acting as the "sink zone".
It is strongly recommended to read the full details of [ADR 001: Coin Source Tracing](/architecture/adr-001-coin-source-tracing) to understand the implications and context of the IBC token representations.
## UX suggestions for clients
For clients (wallets, exchanges, applications, block explorers, etc) that want to display the source of the token, it is recommended to use the following alternatives for each of the cases below:
### Direct connection
If the denomination trace contains a single identifier prefix pair (as in the example above), then
the easiest way to retrieve the chain and light client identifier is to map the trace information
directly. In summary, this requires querying the channel from the denomination trace identifiers,
and then the counterparty client state using the counterparty port and channel identifiers from the
retrieved channel.
A general pseudo algorithm would look like the following:
1. Query the full denomination trace.
2. Query the channel with the `portID/channelID` pair, which corresponds to the first destination of the
token.
3. Query the client state using the identifiers pair. Note that this query will return a `"Not
Found"` response if the current chain is not connected to this channel.
4. Retrieve the client identifier or chain identifier from the client state (eg: on
Tendermint clients) and store it locally.
Using the gRPC gateway client service the steps above would be, with a given IBC token `ibc/7F1D3FCF4AE79E1554D670D1AD949A9BA4E4A3C76C63093E17E446A46061A7A2` stored on `chainB`:
1. `GET /ibc/apps/transfer/v1/denom_traces/7F1D3FCF4AE79E1554D670D1AD949A9BA4E4A3C76C63093E17E446A46061A7A2` -> `{"path": "transfer/channelToA", "base_denom": "uatom"}`
2. `GET /ibc/apps/transfer/v1/channels/channelToA/ports/transfer/client_state"` -> `{"client_id": "clientA", "chain-id": "chainA", ...}`
3. `GET /ibc/apps/transfer/v1/channels/channelToA/ports/transfer"` -> `{"channel_id": "channelToA", port_id": "transfer", counterparty: {"channel_id": "channelToB", port_id": "transfer"}, ...}`
4. `GET /ibc/apps/transfer/v1/channels/channelToB/ports/transfer/client_state" -> {"client_id": "clientB", "chain-id": "chainB", ...}`
Then, the token transfer chain path for the `uatom` denomination would be: `chainA` -> `chainB`.
### Multiple hops
The multiple channel hops case applies when the token has passed through multiple chains between the original source and final destination chains.
The IBC protocol doesn't know the topology of the overall network (i.e connections between chains and identifier names between them). For this reason, in the multiple hops case, a particular chain in the timeline of the individual transfers can't query the chain and client identifiers of the other chains.
Take for example the following sequence of transfers `A -> B -> C` for an IBC token, with a final prefix path (trace info) of `transfer/channelChainC/transfer/channelChainB`. What the paragraph above means is that even in the case that chain `C` is directly connected to chain `A`, querying the port and channel identifiers that chain `B` uses to connect to chain `A` (eg: `transfer/channelChainA`) can be completely different from the one that chain `C` uses to connect to chain `A` (eg: `transfer/channelToChainA`).
Thus the proposed solution for clients that the IBC team recommends are the following:
- **Connect to all chains**: Connecting to all the chains in the timeline would allow clients to
perform the queries outlined in the [direct connection](#direct-connection) section to each
relevant chain. By repeatedly following the port and channel denomination trace transfer timeline,
clients should always be able to find all the relevant identifiers. This comes at the tradeoff
that the client must connect to nodes on each of the chains in order to perform the queries.
- **Relayer as a Service (RaaS)**: A longer term solution is to use/create a relayer service that
could map the denomination trace to the chain path timeline for each token (i.e `origin chain ->
chain #1 -> ... -> chain #(n-1) -> final chain`). These services could provide merkle proofs in
order to allow clients to optionally verify the path timeline correctness for themselves by
running light clients. If the proofs are not verified, they should be considered as trusted third
parties services. Additionally, client would be advised in the future to use RaaS that support the
largest number of connections between chains in the ecosystem. Unfortunately, none of the existing
public relayers (in [Golang](https://github.com/cosmos/relayer) and
[Rust](https://github.com/informalsystems/ibc-rs)), provide this service to clients.
:::tip
The only viable alternative for clients (at the time of writing) to tokens with multiple connection hops, is to connect to all chains directly and perform relevant queries to each of them in the sequence.
:::
## Forwarding
:::info
Token forwarding and unwinding is supported only on ICS20 v2 transfer channels.
:::
Forwarding allows tokens to be routed to a final destination through multiple (up to 8) intermediary
chains. With forwarding, it's also possible to unwind IBC vouchers to their native chain, and forward
them afterwards to another destination, all with just a single transfer transaction on the sending chain.
### Forward tokens
Native tokens or IBC vouchers on any chain can be forwarded through intermediary chains to reach their
final destination. For example, given the topology below, with 3 chains and a transfer channel between
chains A and B and between chains B and C:
![Light Mode Forwarding](./images/forwarding-3-chains-light.png#gh-light-mode-only)![Dark Mode Forwarding](./images/forwarding-3-chains-dark.png#gh-dark-mode-only)
Native tokens on chain `A` can be sent to chain `C` through chain `B`. The routing is specified by the
source port ID and channel ID of choice on every intermediary chain. In this example, there is only one
forwarding hop on chain `B` and the port ID, channel ID pair is `transfer`, `channelBToC`. Forwarding of
a multi-denom collections of tokens is also allowed (i.e. forwarding of tokens of different denominations).
### Unwind tokens
Taking again as an example the topology from the previous section, we assume that native tokens on chain `A`
have been transferred to chain `C`. The IBC vouchers on chain `C` have the denomination trace
`transfer/channelCtoB/transfer/channelBtoA`, and with forwarding it is possible to submit a transfer message
on chain `C` and automatically unwind the vouchers through chain `B` to chain `A`, so that the tokens recovered
on the origin chain regain their native denomination. In order to execute automatic unwinding, the transfer
module does not require extra user input: the unwind route is encoded in the denomination trace with the
pairs of destination port ID, channel ID that are added on every chain where the tokens are received.
Please note that unwinding of vouchers is only allowed when vouchers transferred all share the same denomination
trace (signifying coins that all originate from the same source). It is not possible to unwind vouchers of two different
IBC denominations, since they come from different source chains.
### Unwind tokens and then forward
Unwinding and forwarding can be used in combination, so that vouchers are first unwound to their origin chain
and then forwarded to a final destination. The same restriction as in the unwinding case applies: only vouchers
of a single IBC denomination can be used.
## Locked funds
In some [exceptional cases](/architecture/adr-026-ibc-client-recovery-mechanisms#exceptional-cases), a client state associated with a given channel cannot be updated. This causes that funds from fungible tokens in that channel will be permanently locked and thus can no longer be transferred.
To mitigate this, a client update governance proposal can be submitted to update the frozen client
with a new valid header. Once the proposal passes the client state will be unfrozen and the funds
from the associated channels will then be unlocked. This mechanism only applies to clients that
allow updates via governance, such as Tendermint clients.
In addition to this, it's important to mention that a token must be sent back along the exact route
that it took originally in order to return it to its original form on the source chain (eg: the
Cosmos Hub for the `uatom`). Sending a token back to the same chain across a different channel will
**not** move the token back across its timeline. If a channel in the chain history closes before the
token can be sent back across that channel, then the token will not be returnable to its original
form.
## Security considerations
For safety, no other module must be capable of minting tokens with the `ibc/` prefix. The IBC
transfer module needs a subset of the denomination space that only it can create tokens in.
## Channel Closure
The IBC transfer module does not support channel closure.

View File

@ -1,47 +0,0 @@
# Introduction
Sonr is a decentralized identity network built on the Cosmos-sdk. It has early origins as a peer-to-peer file sharing network, but has since evolved into a platform for decentralized authentication and authorization. The early lessons taught from our file sharing roots are used as our theology for building the Sonr Blockchain.
1. [Cosmos-SDK](./concepts/Cosmos-SDK.md)
2. [Chain-Modules](./concepts/Chain-Modules.md)
3. [System-Architecture](./concepts/System-Architecture.md)
4. [Token-Economy](./concepts/Token-Economy.md)
5. [Service-Management](./concepts/Service-Management.md)
6. [Design-System](./concepts/Design-System.md)
7. [Self-Custody](./concepts/Self-Custody.md)
8. [Consumer Launch](./concepts/Consumer-Launch.md)
## Principles
1. Bitcoin is digital gold
2. Blockchains are programmable databases with functional operations
3. Staking is essentially a savings account
4. The Sonr Network conducts all operations in the $SNR token
5. Service Delegation subsidizes user wallet operations.
6. Cryptocurrency has the potential to break the software innovation ceiling
## The Problem
Centralized identity has led to internet monopolies abusing your trust and privacy.
## The Solution
A peer-to-peer system for decentralized personal identity with Authentication and Authorization capabilities.
## What is Sonr?
A privacy preserving, identity system managed by user controlled decentralized vaults which have the flexibility of
software wallets with the security of hardware wallets.
## The End Goal
A Data sharing economy where human-specific information has intrinsic value. Services are incentivized to act in
good faith in order to obtain quality user data.
## How do we do it?
Provide Internet Citizens with a robust easy to use WebVault which features a crypto wallet, passkey authenticator, and encrypted messages. The WebVault serves as a wrapper over every sensitive intent-based user interaction. The Smart blockchain is responsible for keeping a record of where WebVaults are located, when authorization activity occurs, and which services are allowed over what permissions.
## The User Incentive
Data is the byproduct of currency exchange in the Information age. Meaning services pay other services for user data or profits in order to enrich their database with complete user personas.

View File

@ -1,141 +0,0 @@
# `x/did`
The Decentralized Identity module is responsible for managing native Sonr Accounts, their derived wallets, and associated user identification information.
## State
The DID module maintains several key state structures:
### Controller State
The Controller state represents a Sonr DWN Vault. It includes:
- Unique identifier (number)
- DID
- Sonr address
- Ethereum address
- Bitcoin address
- Public key
- Keyshares pointer
- Claimed block
- Creation block
### Assertion State
The Assertion state includes:
- DID
- Controller
- Subject
- Public key
- Assertion type
- Accumulator (metadata)
- Creation block
### Authentication State
The Authentication state includes:
- DID
- Controller
- Subject
- Public key
- Credential ID
- Metadata
- Creation block
### Verification State
The Verification state includes:
- DID
- Controller
- DID method
- Issuer
- Subject
- Public key
- Verification type
- Metadata
- Creation block
## State Transitions
State transitions are triggered by the following messages:
- LinkAssertion
- LinkAuthentication
- UnlinkAssertion
- UnlinkAuthentication
- ExecuteTx
- UpdateParams
## Messages
The DID module defines the following messages:
1. MsgLinkAuthentication
2. MsgLinkAssertion
3. MsgExecuteTx
4. MsgUnlinkAssertion
5. MsgUnlinkAuthentication
6. MsgUpdateParams
Each message triggers specific state machine behaviors related to managing DIDs, authentications, assertions, and module parameters.
## Query
The DID module provides the following query endpoints:
1. Params: Query all parameters of the module
2. Resolve: Query the DID document by its ID
3. Sign: Sign a message with the DID document
4. Verify: Verify a message with the DID document
## Params
The module parameters include:
- Allowed public keys (map of KeyInfo)
- Conveyance preference
- Attestation formats
## Client
The module provides gRPC and REST endpoints for all defined messages and queries.
## Future Improvements
Potential future improvements could include:
1. Enhanced privacy features for DID operations
2. Integration with more blockchain networks
3. Support for additional key types and cryptographic algorithms
4. Improved revocation mechanisms for credentials and assertions
## Tests
Acceptance tests should cover all major functionality, including:
- Creating and managing DIDs
- Linking and unlinking assertions and authentications
- Executing transactions with DIDs
- Querying and resolving DIDs
- Parameter updates
## Appendix
### Account
An Account represents a user's identity within the Sonr ecosystem. It includes information such as the user's public key, associated wallets, and other identification details.
### Decentralized Identifier (DID)
A Decentralized Identifier (DID) is a unique identifier that is created, owned, and controlled by the user. It is used to establish a secure and verifiable digital identity.
### Verifiable Credential (VC)
A Verifiable Credential (VC) is a digital statement that can be cryptographically verified. It contains claims about a subject (e.g., a user) and is issued by a trusted authority.
### Key Types
The module supports various key types, including:
- Role
- Algorithm (e.g., ES256, EdDSA, ES256K)
- Encoding (e.g., hex, base64, multibase)
- Curve (e.g., P256, P384, P521, X25519, X448, Ed25519, Ed448, secp256k1)
### JSON Web Key (JWK)
The module supports JSON Web Keys (JWK) for representing cryptographic keys, including properties such as key type (kty), curve (crv), and coordinates (x, y) for EC and OKP keys, as well as modulus (n) and exponent (e) for RSA keys.

View File

@ -1,145 +0,0 @@
# `x/dwn`
The DWN module is responsible for the management of IPFS deployed Decentralized Web Nodes (DWNs) and their associated data.
## Concepts
The DWN module introduces several key concepts:
1. Decentralized Web Node (DWN): A distributed network for storing and sharing data.
2. Schema: A structure defining the format of various data types in the dwn.
3. IPFS Integration: The module can interact with IPFS for decentralized data storage.
## State
The DWN module maintains the following state:
### DWN State
The DWN state is stored using the following structure:
```protobuf
message DWN {
uint64 id = 1;
string alias = 2;
string cid = 3;
string resolver = 4;
}
```
This state is indexed by ID, alias, and CID for efficient querying.
### Params State
The module parameters are stored in the following structure:
```protobuf
message Params {
bool ipfs_active = 1;
bool local_registration_enabled = 2;
Schema schema = 4;
}
```
### Schema State
The Schema state defines the structure for various data types:
```protobuf
message Schema {
int32 version = 1;
string account = 2;
string asset = 3;
string chain = 4;
string credential = 5;
string did = 6;
string jwk = 7;
string grant = 8;
string keyshare = 9;
string profile = 10;
}
```
## State Transitions
State transitions in the DWN module are primarily triggered by:
1. Updating module parameters
2. Allocating new dwns
3. Syncing DID documents
## Messages
The DWN module defines the following message:
1. `MsgUpdateParams`: Used to update the module parameters.
```protobuf
message MsgUpdateParams {
string authority = 1;
Params params = 2;
}
```
## Begin Block
No specific begin-block operations are defined for this module.
## End Block
No specific end-block operations are defined for this module.
## Hooks
The DWN module does not define any hooks.
## Events
The DWN module does not explicitly define any events. However, standard Cosmos SDK events may be emitted during state transitions.
## Client
The DWN module provides the following gRPC query endpoints:
1. `Params`: Queries all parameters of the module.
2. `Schema`: Queries the DID document schema.
3. `Allocate`: Initializes a Target DWN available for claims.
4. `Sync`: Queries the DID document by its ID and returns required information.
## Params
The module parameters include:
- `ipfs_active` (bool): Indicates if IPFS integration is active.
- `local_registration_enabled` (bool): Indicates if local registration is enabled.
- `schema` (Schema): Defines the structure for various data types in the dwn.
## Future Improvements
Potential future improvements could include:
1. Enhanced IPFS integration features.
2. Additional authentication mechanisms beyond WebAuthn.
3. Improved DID document management and querying capabilities.
## Tests
Acceptance tests should cover:
1. Parameter updates
2. DWN state management
3. Schema queries
4. DWN allocation process
5. DID document syncing
## Appendix
| Concept | Description |
| ------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| Decentralized Web Node (DWN) | A decentralized, distributed, and secure network of nodes that store and share data. It is a decentralized alternative to traditional web hosting services. |
| Decentralized Identifier (DID) | A unique identifier that is created, owned, and controlled by the user. It is used to establish a secure and verifiable digital identity. |
| HTMX (Hypertext Markup Language eXtensions) | A set of extensions to HTML that allow for the creation of interactive web pages. It is used to enhance the user experience and provide additional functionality to web applications. |
| IPFS (InterPlanetary File System) | A decentralized, peer-to-peer network for storing and sharing data. It is a distributed file system that allows for the creation and sharing of content across a network of nodes. |
| WebAuthn (Web Authentication) | A set of APIs that allow websites to request user authentication using biometric or non-biometric factors. |
| WebAssembly (Web Assembly) | A binary instruction format for a stack-based virtual machine. |
| Verifiable Credential (VC) | A digital statement that can be cryptographically verified. |

View File

@ -1,91 +0,0 @@
# `x/svc`
The svc module is responsible for managing the registration and authorization of services within the Sonr ecosystem. It provides a secure and verifiable mechanism for registering and authorizing services using Decentralized Identifiers (DIDs).
## Concepts
- **Service**: A decentralized svc on the Sonr Blockchain with properties such as ID, authority, origin, name, description, category, tags, and expiry height.
- **Profile**: Represents a DID alias with properties like ID, subject, origin, and controller.
- **Metadata**: Contains information about a svc, including name, description, category, icon, and tags.
### Dependencies
- [x/did](https://github.com/onsonr/sonr/tree/master/x/did)
- [x/group](https://github.com/onsonr/sonr/tree/master/x/group)
- [x/nft](https://github.com/onsonr/sonr/tree/master/x/nft)
## State
The module uses the following state structures:
### Metadata
Stores information about services:
- Primary key: `id` (auto-increment)
- Unique index: `origin`
- Fields: id, origin, name, description, category, icon (URI), tags
### Profile
Stores DID alias information:
- Primary key: `id`
- Unique index: `subject,origin`
- Fields: id, subject, origin, controller
## Messages
### MsgUpdateParams
Updates the module parameters. Can only be executed by the governance account.
### MsgRegisterService
Registers a new svc on the blockchain. Requires a valid TXT record in DNS for the origin.
## Params
The module has the following parameters:
- `categories`: List of allowed svc categories
- `types`: List of allowed svc types
## Query
The module provides the following query:
### Params
Retrieves all parameters of the module.
## Client
### gRPC
The module provides a gRPC Query svc with the following RPC:
- `Params`: Get all parameters of the module
### CLI
(TODO: Add CLI commands for interacting with the module)
## Events
(TODO: List and describe event tags used by the module)
## Future Improvements
- Implement svc discovery mechanisms
- Add support for svc reputation and rating systems
- Enhance svc metadata with more detailed information
- Implement svc update and deactivation functionality
## Tests
(TODO: Add acceptance tests for the module)
## Appendix
This module is part of the Sonr blockchain project and interacts with other modules such as DID and NFT modules to provide a comprehensive decentralized svc ecosystem.

View File

@ -1,873 +0,0 @@
# User Controlled Authorization Network (UCAN) Specification
# Abstract
User-Controlled Authorization Network (UCAN) is a [trustless], secure, [local-first], user-originated, distributed authorization scheme. This document provides a high level overview of the components of the system, concepts, and motivation. Exact formats are given in [sub-specifications].
# Introduction
User-Controlled Authorization Network (UCAN) is a [trustless], secure, [local-first], user-originated, distributed authorization scheme. It provides public-key verifiable, delegable, expressive, openly extensible [capabilities]. UCANs achieve public verifiability with late-bound certificate chains and principals represented by [decentralized identifiers (DIDs)][DID].
UCAN improves the familiarity and adoptability of schemes like [SPKI/SDSI][SPKI] for web and native application contexts. UCAN allows for the creation, delegation, and invocation of authority by any agent with a DID, including traditional systems and peer-to-peer architectures beyond traditional cloud computing.
## Motivation
> If we practice our principles, we could have both security and functionality. Treating security as a separate concern has not succeeded in bridging the gap between principle and practice, because it operates without knowledge of what constitutes least authority.
>
> — [Miller][Mark Miller] et al, [The Structure of Authority]
Since at least [Multics], access control lists ([ACL]s) have been the most popular form of digital authorization, where a list of what each user is allowed to do is maintained on the resource. ACLs (and later [RBAC]) have been a successful model suited to architectures where persistent access to a single list is viable. ACLs require that rules are sufficiently well specified, such as in a centralized database with rules covering all possible permutations of scenario. This both imposes a very high maintenance burden on programmers as a systems grows in complexity, and is a key vector for [confused deputies][confused deputy problem].
With increasing interconnectivity between machines becoming commonplace, authorization needs to scale to meet the load demands of distributed systems while providing partition tolerance. However, it is not always practical to maintain a single central authorization source. Even when copies of the authorization list are distributed to the relevant servers, latency and partitions introduce troublesome challenges with conflicting updates, to say nothing of storage requirements.
A large portion of personal information now also moves through connected systems. As a result, data privacy is a prominent theme when considering the design of modern applications, to the point of being legislated in parts of the world.
Ahead-of-time coordination is often a barrier to development in many projects. Flexibility to define specialized authorization semantics for resources and the ability to integrate with external systems trustlessly are essential as the number of autonomous, specialized, and coordinated applications increases.
Many high-value applications run in hostile environments. In recognition of this, many vendors now include public key functionality, such as [non-extractable keys in browsers][browser api crypto key], [certificate systems for external keys][fido], [platform keys][passkey], and [secure hardware enclaves] in widespread consumer devices.
Two related models that work exceptionally well in the above context are Simple Public Key Infrastructure ([SPKI][spki rfc]) and object capabilities ([OCAP]). Since offline operation and self-verifiability are two requirements, UCAN adopts a [certificate capability model] related to [SPKI].
## Intuition for Auth System Differences
The following analogies illustrate several significant trade-offs between these systems but are only accurate enough to build intuition. A good resource for a more thorough presentation of these trade-offs is [Capability Myths Demolished]. In this framework, UCAN approximates SPKI with some dynamic features.
### Access Control Lists
By analogy, ACLs are like a bouncer at an exclusive event. This bouncer has a list attendees allowed in and which of those are VIPs that get extra access. People trying to get in show their government-issued ID and are accepted or rejected. In addition, they may get a lanyard to identify that they have previously been allowed in. If someone is disruptive, they can simply be crossed off the list and denied further entry.
If there are many such events at many venues, the organizers need to coordinate ahead of time, denials need to be synchronized, and attendees need to show their ID cards to many bouncers. The likelihood of the bouncer letting in the wrong person due to synchronization lag or confusion by someone sharing a name is nonzero.
### Certificate Capabilities
UCANs work more like [movie tickets][caps as keys] or a festival pass. No one needs to check your ID; who you are is irrelevant. For example, if you have a ticket issued by the theater to see Citizen Kane, you are admitted to Theater 3. If you cannot attend an event, you can hand this ticket to a friend who wants to see the film instead, and there is no coordination required with the theater ahead of time. However, if the theater needs to cancel tickets for some reason, they need a way of uniquely identifying them and sharing this information between them.
### Object Capabilities
Object capability ("ocap") systems use a combination of references, encapsulated state, and proxy forwarding. As the name implies, this is fairly close to object-oriented or actor-based systems. Object capabilities are [robust][Robust Composition], flexible, and expressive.
To achieve these properties, object capabilities have two requirements: [fail-safe], and locality preservation. The emphasis on consistency rules out partition tolerance[^pcec].
## Security Considerations
Each UCAN includes an assertions of what it is allowed to do. "Proofs" are positive evidence (elsewhere called "witnesses") of the possession of rights. They are cryptographically verifiable chains showing that the UCAN issuer either claims to directly own a resource, or that it was delegated to them by some claimed owner. In the most common case, the root owner's ID is the only globally unique identity for the resource.
Root capability issuers function as verifiable, distributed roots of trust. The delegation chain is by definition a provenance log. Private keys themselves SHOULD NOT move from one context to another. Keeping keys unique to each physical device and unique per use case is RECOMMENDED to reduce opportunity for keys to leak, and limit blast radius in the case of compromises. "Sharing authority without sharing keys" is provided by capabilities, so there is no reason to share keys directly.
Note that a structurally and cryptographically valid UCAN chain can be semantically invalid. The executor MUST verify the ownership of any external resources at execution time. While not possible for all use cases (e.g. replicated state machines and eventually consistent data), having the Executor be the resource itself is RECOMMENDED.
While certificate chains go a long way toward improving security, they do not provide [confinement] on their own. The principle of least authority SHOULD be used when delegating a UCAN: minimizing the amount of time that a UCAN is valid for and reducing authority to the bare minimum required for the delegate to complete their task. This delegate should be trusted as little as is practical since they can further sub-delegate their authority to others without alerting their delegator. UCANs do not offer confinement (as that would require all processes to be online), so it is impossible to guarantee knowledge of all of the sub-delegations that exist. The ability to revoke some or all downstream UCANs exists as a last resort.
## Inversion of Control
[Inversion of control] is achieved due to two properties: self-certifying delegation and reference passing. There is no Authorization Server (AS) that sits between requestors and resources. In traditional terms, the owner of a UCAN resource is the resource server (RS) directly.
This inverts the usual relationship between resources and users: the resource grants some (or all) authority over itself to agents, as opposed to an Authorization Server managing the relationship between them. This has several major advantages:
- Fully distributed and scalable
- Self-contained request without intermediary
- Partition tolerance, [support for replicated data and machines][overcoming SSI]
- Flexible granularity
- Compositionality: no distinction between resources residing together or apart
```
┌─────────────┐ ┌─────────────┐ ┌─────────────┐
│ │ │ │ │ │
│ │ │ ┌─────────┐ │ │ │
│ │ │ │ Bob's │ │ │ │
│ │ │ │ Photo │ │ │ │
│ │ │ │ Gallery │ │ │ │
│ │ │ └─────────┘ │ │ │
│ │ │ │ │ │
│ Alice's │ │ Bob's │ │ Carol's │
│ Stuff │ │ Stuff │ │ Stuff │
│ │ │ │ │ │
│ ┌───────┼───┼─────────────┼───┼──┐ │
│ │ │ │ │ │ │ │
│ │ │ │ ┌───┼───┼──┼────────┐ │
│ │ │ │ Alice's │ │ │ │ │ │
│ │ │ │ Music │ │ │ │Carol's │ │
│ │ │ │ Player │ │ │ │ Game │ │
│ │ │ │ │ │ │ │ │ │
│ │ │ │ └───┼───┼──┼────────┘ │
│ │ │ │ │ │ │ │
│ └───────┼───┼─────────────┼───┼──┘ │
│ │ │ │ │ │
└─────────────┘ └─────────────┘ └─────────────┘
```
This additionally allows UCAN to model auth for [eventually consistent and replicated state][overcoming SSI].
# Roles
There are several roles that an agent MAY assume:
| Name | Description |
| --------- | ------------------------------------------------------------------------------------------------ |
| Agent | The general class of entities and principals that interact with a UCAN |
| Audience | The Principal delegated to in the current UCAN. Listed in the `aud` field |
| Executor | The Agent that actually performs the action described in an invocation |
| Invoker | A Principal that requests an Executor perform some action that uses the Invoker's authority |
| Issuer | The Principal of the current UCAN. Listed in the `iss` field |
| Owner | A Subject that controls some external resource |
| Principal | An agent identified by DID (listed in a UCAN's `iss` or `aud` field) |
| Revoker | The Issuer listed in a proof chain that revokes a UCAN |
| Subject | The Principal who's authority is delegated or invoked |
| Validator | Any Agent that interprets a UCAN to determine that it is valid, and which capabilities it grants |
```mermaid
flowchart TD
subgraph Agent
subgraph Principal
direction TB
subgraph Issuer
direction TB
subgraph Subject
direction TB
Executor
Owner
end
Revoker
end
subgraph Audience
Invoker
end
end
Validator
end
```
## Subject
> At the very least every object should have a URL
>
> — [Alan Kay], [The computer revolution hasn't happened yet]
> Every Erlang process in the universe should be addressable and introspective
>
> — [Joe Armstrong], [Code Mesh 2016]
A [Subject] represents the Agent that a capability is for. A Subject MUST be referenced by [DID]. This behaves much like a [GUID], with the addition of public key verifiability. This unforgeability prevents malicious namespace collisions which can lead to [confused deputies][confused deputy problem].
### Resource
A resource is some data or process that can be uniquely identified by a [URI]. It can be anything from a row in a database, a user account, storage quota, email address, etc. Resource MAY be as coarse or fine grained as desired. Finer-grained is RECOMMENDED where possible, as it is easier to model the principle of least authority ([PoLA]).
A resource describes the noun of a capability. The resource pointer MUST be provided in [URI] format. Arbitrary and custom URIs MAY be used, provided that the intended recipient can decode the URI. The URI is merely a unique identifier to describe the pointer to — and within — a resource.
Having a unique agent represent a resource (and act as its manager) is RECOMMENDED. However, to help traditional ACL-based systems transition to certificate capabilities, an agent MAY manage multiple resources, and [act as the registrant in the ACL system][wrapping existing systems].
Unless explicitly stated, the Resource of a UCAN MUST be the Subject.
## Issuer & Audience
The Issuer (`iss`) and Audience (`aud`) can be conceptualized as the sender and receiver (respectively) of a postal letter. Every UCAN MUST be signed with the private key associated with the DID in the `iss` field.
For example:
```js
"aud": "did:key:z6MkiTBz1ymuepAQ4HEHYSF1H8quG5GLVVQR3djdX3mDooWp",
"iss": "did:key:zDnaerDaTF5BXEavCrfRZEk316dpbLsfPDZ3WJ5hRTPFU2169",
```
Please see the [Cryptosuite] section for more detail on DIDs.
# Lifecycle
The UCAN lifecycle has four components:
| Spec | Description | Requirement Level |
| ------------ | ------------------------------------------------------------------------ | ----------------- |
| [Delegation] | Pass, attenuate, and secure authority in a partition-tolerant way | REQUIRED |
| [Invocation] | Exercise authority that has been delegated through one or more delegates | REQUIRED |
| [Promise] | Await the result of an Invocation inside another Invocation | RECOMMENDED |
| [Revocation] | Undo a delegation, breaking a delegation chain for malicious users | RECOMMENDED |
```mermaid
flowchart TD
prm(Promise)
inv(Invocation)
del(Delegation)
rev(Revocation)
prm -->|awaits| inv
del -->|proves| inv
rev -.->|kind of| inv
rev -->|invalidates| del
click del href "https://github.com/ucan-wg/delegation" "UCAN Delegation Spec"
click inv href "https://github.com/ucan-wg/invocation" "UCAN Invocation Spec"
click rev href "https://github.com/ucan-wg/revocation" "UCAN Revocation Spec"
```
## Time
It is often useful to talk about a UCAN in the context of some action. For example, a UCAN delegation may be valid when it was created, but expired when invoked.
```mermaid
sequenceDiagram
Alice -->> Bob: Delegate
Bob ->> Bob: Validate
Bob -->> Carol: Delegate
Carol ->> Carol: Validate
Carol ->> Alice: Invoke
Alice ->> Alice: Validate
Alice ->> Alice: Execute
```
### Validity Interval
The period of time that a capability is valid from and until. This is the range from the latest "not before" to the earliest expiry in the UCAN delegation chain.
### Delegation-Time
The moment at which a delegation is asserted. This MAY be captured by an `iat` field, but is generally superfluous to capture in the token.
### Invocation-Time
The moment a UCAN Invocation is created. It must be within the Validity Interval.
### Validation-Time
Validation MAY occur at multiple points during a UCAN's lifecycle. The main two are:
- On receipt of a delegation
- When executing an invocation
### Execution-Time
To avoid the overloaded word "runtime", UCAN adopts the term "execution-time" to express the moment that the executor attempts to use the authority captured in an invocation and associated delegation chain. Validation MUST occur at this time.
## Time Bounds
`nbf` and `exp` stand for "not before" and "expires at," respectively. These MUST be expressed as seconds since the Unix epoch in UTC, without time zone or other offset. Taken together, they represent the time bounds for a token. These timestamps MUST be represented as the number of integer seconds since the Unix epoch. Due to limitations[^js-num-size] in numerics for certain common languages, timestamps outside of the range from $-2^{53} 1$ to $2^{53} 1$ MUST be rejected as invalid.
The `nbf` field is OPTIONAL. When omitted, the token MUST be treated as valid beginning from the Unix epoch. Setting the `nbf` field to a time in the future MUST delay invoking a UCAN. For example, pre-provisioning access to conference materials ahead of time but not allowing access until the day it starts is achievable with judicious use of `nbf`.
The `exp` field is RECOMMENDED. Following the [principle of least authority][PoLA], it is RECOMMENDED to give a timestamp expiry for UCANs. If the token explicitly never expires, the `exp` field MUST be set to `null`. If the time is in the past at validation time, the token MUST be treated as expired and invalid.
Keeping the window of validity as short as possible is RECOMMENDED. Limiting the time range can mitigate the risk of a malicious user abusing a UCAN. However, this is situationally dependent. It may be desirable to limit the frequency of forced reauthorizations for trusted devices. Due to clock drift, time bounds SHOULD NOT be considered exact. A buffer of ±60 seconds is RECOMMENDED.
Several named points of time in the UCAN lifecycle can be found in the [high level spec][UCAN].
Below are a couple examples:
```js
{
// ...
"nbf": 1529496683,
"exp": 1575606941
}
```
```js
{
// ...
"exp": 1575606941
}
```
```js
{
// ...
"nbf": 1529496683,
"exp": null
}
```
## Lifecycle Example
Here is a concrete example of all stages of the UCAN lifecycle for database write access.
```mermaid
sequenceDiagram
participant Database
actor DBAgent
actor Alice
actor Bob
Note over Database, DBAgent: Set Up Agent-Owned Resource
DBAgent ->> Database: createDB()
autonumber 1
Note over DBAgent, Bob: Delegation
DBAgent -->> Alice: delegate(DBAgent, write)
Alice -->> Bob: delegate(DBAgent, write)
Note over Database, Bob: Invocation
Bob ->> DBAgent: invoke(DBAgent, [write, [key, value]], proof: [➊,➋])
DBAgent ->> Database: write(key, value)
DBAgent ->> Bob: ACK
Note over DBAgent, Bob: Revocation
Alice ->> DBAgent: revoke(➋, proof: [➊,➋])
Bob ->> DBAgent: invoke(DBAgent, [write, [key, newValue]], proof: [➊,➋])
DBAgent -X Bob: NAK(➏) [rejected]
```
## Capability
A capability is the association of an ability to a subject: `subject x command x policy`.
The Subject and Command fields are REQUIRED. Any non-normative extensions are OPTIONAL.
For example, a capability may used to represent the ability to send email from a certain address to others at `@example.com`.
| Field | Example |
| ------- | -------------------------------------------------------------------------------------------- |
| Subject | `did:key:z6MkhaXgBZDvotDkL5257faiztiGiC2QtKLGpbnnEGta2doK` |
| Command | `/msg/send` |
| Policy | `["or", ["==", ".from", "mailto:me@example.com"], ["match", ".cc", "mailto:*@example.com"]]` |
For a more complete treatment, please see the [UCAN Delegation][delegation] spec.
## Authority
> Whether to enable cooperation or to limit vulnerability, we care about _authority_ rather than _permissions._ Permissions determine what actions an individual program may perform on objects it can directly access. Authority describes the effects that a program may cause on objects it can access, either directly by permission, or indirectly by permitted interactions with other programs.
>
> —[Mark Miller], [Robust Composition]
The set of capabilities delegated by a UCAN is called its "authority." To frame it another way, it's the set of effects that a principal can cause, and acts as a declarative description of delegated abilities.
Merging capability authorities MUST follow set semantics, where the result includes all capabilities from the input authorities. Since broader capabilities automatically include narrower ones, this process is always additive. Capability authorities can be combined in any order, with the result always being at least as broad as each of the original authorities.
```plaintext
┌───────────────────────┐ ┐
│ │ │
│ │ │
│ │ │
│ │ │
│ Subject B │ │
┌──────────────────┼ ─ ─ x │ │
│ │ Ability Z │ ├── BxZ
│ │ │ │ Capability
│ │ │ │
│ │ │ │
│ Subject A │ │ │
│ x │ │ │
│ Ability Y ─ ─┼──────────────────┘ ┘
│ │
│ │
│ │
│ │
│ │
└───────────────────────┘
└─────────────────────┬────────────────────┘
AxY U BxZ
Capability
```
The capability authority is the total rights of the authorization space down to the relevant volume of authorizations. Individual capabilities MAY overlap; the authority is the union. Every unique delegated capability MUST have equal or narrower capabilities from their delegator. Inside this content space, you can draw a boundary around some resource(s) (their type, identifiers, and paths or children) and their capabilities.
## Command
Commands are concrete messages ("verbs") that MUST be unambiguously interpretable by the Subject of a UCAN. Commands are REQUIRED in invocations. Some examples include `/msg/send`, `/crud/read`, and `/ucan/revoke`.
Much like other message-passing systems, the specific resource MUST define the behavior for a particular message. For instance, `/crud/update` MAY be used to destructively update a database row, or append to a append-only log. Specific messages MAY be created at will; the only restriction is that the Executor understand how to interpret that message in the context of a specific resource.
While arbitrary semantics MAY be described, they MUST apply to the target resource. For instance, it does not make sense to apply `/msg/send` to a typical file system.
### Segment Structure
Commands MUST be lowercase, and begin with a slash (`/`). Segments MUST be separated by a slash. A trailing slash MUST NOT be present. All of the following are syntactically valid Commands:
- `/`
- `/crud`
- `/crud/create`
- `/stack/pop`
- `/crypto/sign`
- `/foo/bar/baz/qux/quux`
- `/ほげ/ふが`
Segment structure is important since shorter Commands prove longer paths. For example, `/` can be used as a proof of _any_ other Command. For example, `/crypto` MAY be used to prove `/crypto/sign` but MUST NOT prove `/stack/pop` or `/cryptocurrency`.
### `/` AKA "Top"
_"Top" (`/`) is the most powerful ability, and as such it SHOULD be handled with care and used sparingly._
The "top" (or "any", or "wildcard") ability MUST be denoted `/`. This can be thought of as something akin to a super user permission in RBAC.
The wildcard ability grants access to all other capabilities for the specified resource, across all possible namespaces. The wildcard ability is useful when "linking" agents by delegating all access to another device controlled by the same user, and that should behave as the same agent. It is extremely powerful, and should be used with care. Among other things, it permits the delegate to update a Subject's mutable DID document (change their private keys), revoke UCAN delegations, and use any resources delegated to the Subject by others.
```mermaid
%%{ init: { 'flowchart': { 'curve': 'linear' } } }%%
flowchart BT
/
/msg --> /
subgraph msgGraph [ ]
/msg/send --> /msg
/msg/receive --> /msg
end
/crud --> /
subgraph crudGraph [ ]
/crud/read --> /crud
/crud/mutate --> /crud
subgraph mutationGraph [ ]
/crud/mutate/create --> /crud/mutate
/crud/mutate/update --> /crud/mutate
/crud/mutate/destroy --> /crud/mutate
end
end
... --> /
```
### Reserved Commands
#### `/ucan` Namespace
The `/ucan` Command namespace MUST be reserved. This MUST include any ability string matching the regex `^ucan\/.*`. This is important for keeping a space for community-blessed Commands in the future, such as standard library Commands, such as [Revocation].
## Attenuation
Attenuation is the process of constraining the capabilities in a delegation chain. Each direct delegation MUST either directly restate or attenuate (diminish) its capabilities.
# Token Resolution
Token resolution is transport specific. The exact format is left to the relevant UCAN transport specification. At minimum, such a specification MUST define at least the following:
1. Request protocol
2. Response protocol
3. Collections format
Note that if an instance cannot dereference a CID at runtime, the UCAN MUST fail validation. This is consistent with the [constructive semantics] of UCAN.
# Nonce
The REQUIRED nonce parameter `nonce` MAY be any value. A randomly generated string is RECOMMENDED to provide a unique UCAN, though it MAY also be a monotonically increasing count of the number of links in the hash chain. This field helps prevent replay attacks and ensures a unique CID per delegation. The `iss`, `aud`, and `exp` fields together will often ensure that UCANs are unique, but adding the nonce ensures uniqueness.
The recommended size of the nonce differs by key type. In many cases, a random 12-byte nonce is sufficient. If uncertain, check the nonce in your DID's crypto suite.
This field SHOULD NOT be used to sign arbitrary data, such as signature challenges. See the [`meta`][Metadata] field for more.
Here is a simple example.
```js
{
// ...
"nonce": {"/": {"bytes": "bGlnaHQgd29yay4"}}
}
```
# Metadata
The OPTIONAL `meta` field contains a map of arbitrary metadata, facts, and proofs of knowledge. The enclosed data MUST be self-evident and externally verifiable. It MAY include information such as hash preimages, server challenges, a Merkle proof, dictionary data, etc.
The data contained in this map MUST NOT be semantically meaningful to delegation chains.
Below is an example:
```js
{
// ...
"meta": {
"challenges": {
"example.com": "abcdef",
"another.example.net": "12345"
},
"sha3_256": {
"B94D27B9934D3E08A52E52D7DA7DABFAC484EFE37A5380EE9088F7ACE2EFCDE9": "hello world"
}
}
}
```
# Canonicalization
## Cryptosuite
Across all UCAN specifications, the following cryptosuite MUST be supported:
| Role | REQUIRED Algorithms | Notes |
| --------- | --------------------------------- | ------------------------------------ |
| Hash | [SHA-256] | |
| Signature | [Ed25519], [P-256], [`secp256k1`] | Preference of Ed25519 is RECOMMENDED |
| [DID] | [`did:key`] | |
## Encoding
All UCANs MUST be canonically encoded with [DAG-CBOR] for signing. A UCAN MAY be presented or stored in other [IPLD] formats (such as [DAG-JSON]), but converted to DAG-CBOR for signature validation.
## Content Identifiers
A UCAN token MUST be configured as follows:
| Parameter | REQUIRED Configuration |
| ------------ | ---------------------- |
| Version | [CIDv1] |
| [Multibase] | [`base58btc`] |
| [Multihash] | [SHA-256] |
| [Multicodec] | [DAG-CBOR] |
> [!NOTE]
> All CIDs encoded as above start with the characters `zdpu`.
The resolution of these addresses is left to the implementation and end-user, and MAY (non-exclusively) include the following: local store, a distributed hash table (DHT), gossip network, or RESTful service.
## Envelope
All UCAN formats MUST use the following envelope format:
| Field | Type | Description |
| --------------------------------- | -------------- | -------------------------------------------------------------- |
| `.0` | `Bytes` | A signature by the Payload's `iss` over the `SigPayload` field |
| `.1` | `SigPayload` | The content that was signed |
| `.1.h` | `VarsigHeader` | The [Varsig] v1 header |
| `.1.ucan/<subspec-tag>@<version>` | `TokenPayload` | The UCAN token payload |
```mermaid
flowchart TD
subgraph Ucan ["UCAN Envelope"]
SignatureBytes["Signature (raw bytes)"]
subgraph SigPayload ["Signature Payload"]
VarsigHeader["Varsig Header"]
subgraph UcanPayload ["Token Payload"]
fields["..."]
end
end
end
```
For example:
```js
[
{
"/": {
bytes:
"7aEDQLYvb3lygk9yvAbk0OZD0q+iF9c3+wpZC4YlFThkiNShcVriobPFr/wl3akjM18VvIv/Zw2LtA4uUmB5m8PWEAU",
},
},
{
h: { "/": { bytes: "NBIFEgEAcQ" } },
"ucan/example@1.0.0-rc.1": {
hello: "world",
},
},
];
```
### Payload
A UCAN's Payload MUST contain at least the following fields:
| Field | Type | Required | Description |
| ------- | ----------------------------------------- | -------- | ----------------------------------------------------------- |
| `iss` | `DID` | Yes | Issuer DID (sender) |
| `aud` | `DID` | Yes | Audience DID (receiver) |
| `sub` | `DID` | Yes | Principal that the chain is about (the [Subject]) |
| `cmd` | `String` | Yes | The [Command] to eventually invoke |
| `args` | `{String : Any}` | Yes | Any [Arguments] that MUST be present in the Invocation |
| `nonce` | `Bytes` | Yes | Nonce |
| `meta` | `{String : Any}` | No | [Meta] (asserted, signed data) — is not delegated authority |
| `nbf` | `Integer` (53-bits[^js-num-size]) | No | "Not before" UTC Unix Timestamp in seconds (valid from) |
| `exp` | `Integer \| Null` (53-bits[^js-num-size]) | Yes | Expiration UTC Unix Timestamp in seconds (valid until) |
# Implementation Recommendations
## Delegation Store
A validator MAY keep a local store of UCANs that it has received. UCANs are immutable but also time-bound so that this store MAY evict expired or revoked UCANs.
This store SHOULD be indexed by CID (content addressing). Multiple indices built on top of this store MAY be used to improve capability search or selection performance.
## Memoized Validation
Aside from revocation, capability validation is idempotent. Marking a CID (or capability index inside that CID) as valid acts as memoization, obviating the need to check the entire structure on every validation. This extends to distinct UCANs that share a proof: if the proof was previously reviewed and is not revoked, it is RECOMMENDED to consider it valid immediately.
Revocation is irreversible. Suppose the validator learns of revocation by UCAN CID. In that case, the UCAN and all of its derivatives in such a cache MUST be marked as invalid, and all validations immediately fail without needing to walk the entire structure.
## Replay Attack Prevention
Replay attack prevention is REQUIRED. Every UCAN token MUST hash to a unique [CIDv1]. Some simple strategies for implementing uniqueness tracking include maintaining a set of previously seen CIDs, or requiring that nonces be monotonically increasing per principal. This MAY be the same structure as a validated UCAN memoization table (if one is implemented).
Maintaining a secondary token expiry index is RECOMMENDED. This enables garbage collection and more efficient search. In cases of very large stores, normal cache performance techniques MAY be used, such as Bloom filters, multi-level caches, and so on.
## Beyond Single System Image
> As we continue to increase the number of globally connected devices, we must embrace a design that considers every single member in the system as the primary site for the data that it is generates. It is completely impractical that we can look at a single, or a small number, of globally distributed data centers as the primary site for all global information that we desire to perform computations with.
>
> —[Meiklejohn], [A Certain Tendency Of The Database Community]
Unlike many authorization systems where a service controls access to resources in their care, location-independent, offline, and leaderless resources require control to live with the user. Therefore, the same data MAY be used across many applications, data stores, and users. Since they don't have a single location, applying UCAN to [RSM]s and [CRDT]s MAY be modelled by lifting the requirement that the Executor be the Subject.
Ultimately this comes down to a question of push vs pull. In push, the subject MUST be the specific site being pushed to ("I command you to apply the following updates to your state").
Pull is the broad class of situations where an Invoker doesn't require that a particular replica apply its state. Applying a change to a local CRDT replica and maintaining a UCAN invocation log is a valid update to "the CRDT": a version of the CRDT Subject exists locally even if the Subject's private key is not present. Gossiping these changes among agents allows each to apply changes that it becomes aware of. Thanks to the invocation log (or equivalent integrated directly into the CRDT), provenance of authority is made transparent.
```mermaid
sequenceDiagram
participant CRDT as Initial Grow-Only Set (CRDT)
actor Alice
actor Bob
actor Carol
autonumber
Note over CRDT, Bob: Setup
CRDT -->> Alice: delegate(CRDT_ID, merge)
CRDT -->> Bob: delegate(CRDT_ID, merge)
Note over Bob, Carol: Bob Invites Carol
Bob -->> Carol: delegate(CRDT_ID, merge)
Note over Alice, Carol: Direct P2P Gossip
Carol ->> Bob: invoke(CRDT_ID, merge, {"Carrot"}, proof: [➋,❸])
Alice ->> Carol: invoke(CRDT_ID, merge, {"Apple"}}, proof: [➊])
Bob ->> Alice: invoke(CRDT_ID, merge, {"Banana", "Carrot"}, proof: [➋])
```
## Wrapping Existing Systems
In the RECOMMENDED scenario, the agent controlling a resource has a unique reference to it. This is always possible in a system that has adopted capabilities end-to-end.
Interacting with existing systems MAY require relying on ambient authority contained in an ACL, non-unique reference, or other authorization logic. These cases are still compatible with UCAN, but the security guarantees are weaker since 1. the surface area is larger, and 2. part of the auth system lives outside UCAN.
```mermaid
sequenceDiagram
participant Database
participant ACL as External Auth System
actor DBAgent
actor Alice
actor Bob
Note over ACL, DBAgent: Setup
DBAgent ->> ACL: signup(DBAgent)
ACL ->> ACL: register(DBAgent)
autonumber 1
Note over DBAgent, Bob: Delegation
DBAgent -->> Alice: delegate(DBAgent, write)
Alice -->> Bob: delegate(DBAgent, write)
Note over Database, Bob: Invocation
Bob ->>+ DBAgent: invoke(DBAgent, [write, key, value], proof: [➊,➋])
critical External System
DBAgent ->> ACL: getToken(write, key, AuthGrant)
ACL ->> DBAgent: AccessToken
DBAgent ->> Database: request(write, value, AccessToken)
Database ->> DBAgent: ACK
end
DBAgent ->>- Bob: ACK
```
# FAQ
## What prevents an unauthorized party from using an intercepted UCAN?
UCANs always contain information about the sender and receiver. A UCAN is signed by the sender (the `iss` field DID) and can only be created by an agent in possession of the relevant private key. The recipient (the `aud` field DID) is required to check that the field matches their DID. These two checks together secure the certificate against use by an unauthorized party. [UCAN Invocations][invocation] prevent use by an unauthorized party by signing over a request to use the capability granted in a delegation chain.
## What prevents replay attacks on the invocation use case?
All UCAN Invocations MUST have a unique CID. The executing agent MUST check this validation uniqueness against a local store of unexpired UCAN hashes.
This is not a concern when simply delegating since receiving a delegation is idempotent.
## Is UCAN secure against person-in-the-middle attacks?
_UCAN does not have any special protection against person-in-the-middle (PITM) attacks._
If a PITM attack was successfully performed on a UCAN delegation, the proof chain would contain the attacker's DID(s). It is possible to detect this scenario and revoke the relevant UCAN but this does require special inspection of the topmost `iss` field to check if it is the expected DID. Therefore, it is strongly RECOMMENDED to only delegate UCANs to agents that are both trusted and authenticated and over secure channels.
## Can my implementation support more cryptographic algorithms?
It is possible to use other algorithms, but doing so limits interoperability with the broader UCAN ecosystem. This is thus considered "off spec" (i.e. non-interoperable). If you choose to extend UCAN with additional algorithms, you MUST include this metadata in the (self-describing) [Varsig] header.
# Related Work and Prior Art
[SPKI/SDSI] is closely related to UCAN. A different encoding format is used, and some details vary (such as a delegation-locking bit), but the core idea and general usage pattern are very close. UCAN can be seen as making these ideas more palatable to a modern audience and adding a few features such as content IDs that were less widespread at the time SPKI/SDSI were written.
[ZCAP-LD] is closely related to UCAN. The primary differences are in formatting, addressing by URL instead of CID, the mechanism of separating invocation from authorization, and single versus multiple proofs.
[CACAO] is a translation of many of these ideas to a cross-blockchain delegated bearer token model. It contains the same basic concepts as UCAN delegation, but is aimed at small messages and identities that are rooted in mutable documents rooted on a blockchain and lacks the ability to subdelegate capabilities.
[Local-First Auth] is a non-certificate-based approach, instead relying on a CRDT to build up a list of group members, devices, and roles. It has a friendly invitation mechanism based on a [Seitan token exchange]. It is also straightforward to see which users have access to what, avoiding the confinement problem seen in many decentralized auth systems.
[Macaroon] is a MAC-based capability and cookie system aimed at distributing authority across services in a trusted network (typically in the context of a Cloud). By not relying on asymmetric signatures, Macaroons achieve excellent space savings and performance, given that the MAC can be checked against the relevant services during discharge. The authority is rooted in an originating server rather than with an end-user.
[Biscuit] uses Datalog to describe capabilities. It has a specialized format but is otherwise in line with UCAN.
[Verifiable credentials] are a solution for data about people or organizations. However, they are aimed at a related-but-distinct problem: asserting attributes about the holder of a DID, including things like work history, age, and membership.
# Acknowledgments
Thank you to [Brendan O'Brien] for real-world feedback, technical collaboration, and implementing the first Golang UCAN library.
Thank you [Blaine Cook] for the real-world feedback, ideas on future features, and lessons from other auth standards.
Many thanks to [Hugo Dias], [Mikael Rogers], and the entire DAG House team for the real world feedback, and finding inventive new use cases.
Thank to [Hannah Howard] and [Alan Shaw] at [Storacha] for their team's feedback from real world use cases.
Many thanks to [Brian Ginsburg] and [Steven Vandevelde] for their many copy edits, feedback from real world usage, maintenance of the TypeScript implementation, and tools such as [ucan.xyz].
Many thanks to [Christopher Joel] for his real-world feedback, raising many pragmatic considerations, and the Rust implementation and related crates.
Many thanks to [Christine Lemmer-Webber] for her handwritten(!) feedback on the design of UCAN, spearheading the [OCapN] initiative, and her related work on [ZCAP-LD].
Many thanks to [Alan Karp] for sharing his vast experience with capability-based authorization, patterns, and many right words for us to search for.
Thanks to [Benjamin Goering] for the many community threads and connections to [W3C] standards.
Thanks to [Juan Caballero] for the numerous questions, clarifications, and general advice on putting together a comprehensible spec.
Thank you [Dan Finlay] for being sufficiently passionate about [OCAP] that we realized that capability systems had a real chance of adoption in an ACL-dominated world.
Thanks to [Peter van Hardenberg][PvH] and [Martin Kleppmann] of [Ink & Switch] for conversations exploring options for access control on CRDTs and [local-first] applications.
Thanks to the entire [SPKI WG][SPKI/SDSI] for their closely related pioneering work.
We want to especially recognize [Mark Miller] for his numerous contributions to the field of distributed auth, programming languages, and networked security writ large.
<!-- Footnotes -->
[^js-num-size]: JavaScript has a single numeric type ([`Number`][JS Number]) for both integers and floats. This representation is defined as a [IEEE-754] double-precision floating point number, which has a 53-bit significand.
[^pcec]: To be precise, this is a [PC/EC][PACELC] system, which is a critical trade-off for many systems. UCAN can be used to model both PC/EC and PA/EL, but is most typically PC/EL.
<!-- Internal Links -->
[Command]: #command
[Cryptosuite]: #cryptosuite
[overcoming SSI]: #beyond-single-system-image
[sub-specifications]: #sub-specifications
[wrapping existing systems]: #wrapping-existing-systems
<!-- External Links -->
[IEEE-754]: https://ieeexplore.ieee.org/document/8766229
[A Certain Tendency Of The Database Community]: https://arxiv.org/pdf/1510.08473.pdf
[ACL]: https://en.wikipedia.org/wiki/Access-control_list
[Alan Karp]: https://github.com/alanhkarp
[Alan Kay]: https://en.wikipedia.org/wiki/Alan_Kay
[Alan Shaw]: https://github.com/alanshaw
[BCP 14]: https://www.rfc-editor.org/info/bcp14
[BLAKE3]: https://github.com/BLAKE3-team/BLAKE3
[Benjamin Goering]: https://github.com/gobengo
[Biscuit]: https://github.com/biscuit-auth/biscuit/
[Blaine Cook]: https://github.com/blaine
[Bluesky]: https://blueskyweb.xyz/
[Brendan O'Brien]: https://github.com/b5
[Brian Ginsburg]: https://github.com/bgins
[Brooklyn Zelenka]: https://github.com/expede
[CACAO]: https://blog.ceramic.network/capability-based-data-security-on-ceramic/
[CIDv1]: https://docs.ipfs.io/concepts/content-addressing/#identifier-formats
[CIDv1]: https://github.com/multiformats/cid
[CRDT]: https://en.wikipedia.org/wiki/Conflict-free_replicated_data_type
[Capability Myths Demolished]: https://srl.cs.jhu.edu/pubs/SRL2003-02.pdf
[Christine Lemmer-Webber]: https://github.com/cwebber
[Christopher Joel]: https://github.com/cdata
[Code Mesh 2016]: https://www.codemesh.io/codemesh2016
[DAG-CBOR]: https://ipld.io/specs/codecs/dag-cbor/spec/
[DAG-JSON]: https://ipld.io/specs/codecs/dag-json/spec/
[DID fragment]: https://www.w3.org/TR/did-core/#fragment
[DID path]: https://www.w3.org/TR/did-core/#path
[DID subject]: https://www.w3.org/TR/did-core/#dfn-did-subjects
[DID]: https://www.w3.org/TR/did-core/
[Dan Finlay]: https://github.com/danfinlay
[Daniel Holmgren]: https://github.com/dholms
[ECDSA security]: https://en.wikipedia.org/wiki/Elliptic_Curve_Digital_Signature_Algorithm#Security
[Ed25519]: https://en.wikipedia.org/wiki/EdDSA#Ed25519
[EdDSA]: https://datatracker.ietf.org/doc/html/rfc8032#section-5.1
[Email about SPKI]: https://web.archive.org/web/20140724054706/http://wiki.erights.org/wiki/Capability-based_Active_Invocation_Certificates
[FIDO]: https://fidoalliance.org/what-is-fido/
[Fission]: https://fission.codes
[GUID]: https://en.wikipedia.org/wiki/Universally_unique_identifier
[Hannah Howard]: https://github.com/hannahhoward
[Hugo Dias]: https://github.com/hugomrdias
[IPLD]: https://ipld.io/
[Ink & Switch]: https://www.inkandswitch.com/
[Inversion of control]: https://en.wikipedia.org/wiki/Inversion_of_control
[Irakli Gozalishvili]: https://github.com/Gozala
[JWT]: https://www.rfc-editor.org/rfc/rfc7519
[Joe Armstrong]: https://en.wikipedia.org/wiki/Joe_Armstrong_(programmer)
[Juan Caballero]: https://github.com/bumblefudge
[Local-First Auth]: https://github.com/local-first-web/auth
[Macaroon]: https://storage.googleapis.com/pub-tools-public-publication-data/pdf/41892.pdf
[Mark Miller]: https://github.com/erights
[Martin Kleppmann]: https://martin.kleppmann.com/
[Meiklejohn]: https://christophermeiklejohn.com/
[Mikael Rogers]: https://github.com/mikeal/
[Multibase]: https://github.com/multiformats/multibase
[Multicodec]: https://github.com/multiformats/multicodec
[Multics]: https://en.wikipedia.org/wiki/Multics
[Multihash]: https://www.multiformats.io/multihash/
[OCAP]: http://erights.org/elib/capability/index.html
[OCapN]: https://github.com/ocapn/ocapn
[P-256]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-4.pdf#page=111
[PACELC]: https://en.wikipedia.org/wiki/PACELC_theorem
[Philipp Krüger]: https://github.com/matheus23
[PoLA]: https://en.wikipedia.org/wiki/Principle_of_least_privilege
[Protocol Labs]: https://protocol.ai/
[PvH]: https://www.pvh.ca
[RBAC]: https://en.wikipedia.org/wiki/Role-based_access_control
[RFC 2119]: https://datatracker.ietf.org/doc/html/rfc2119
[RFC 3339]: https://www.rfc-editor.org/rfc/rfc3339
[RFC 8037]: https://datatracker.ietf.org/doc/html/rfc8037
[RSM]: https://en.wikipedia.org/wiki/State_machine_replication
[Robust Composition]: http://www.erights.org/talks/thesis/markm-thesis.pdf
[SHA-256]: https://en.wikipedia.org/wiki/SHA-2
[SPKI/SDSI]: https://datatracker.ietf.org/wg/spki/about/
[SPKI]: https://theworld.com/~cme/html/spki.html
[Seitan token exchange]: https://book.keybase.io/docs/teams/seitan
[Steven Vandevelde]: https://github.com/icidasset
[Storacha]: https://storacha.network/
[The Structure of Authority]: http://erights.org/talks/no-sep/secnotsep.pdf
[The computer revolution hasn't happened yet]: https://www.youtube.com/watch?v=oKg1hTOQXoY
[UCAN Promise]: https://github.com/ucan-wg/promise
[URI]: https://www.rfc-editor.org/rfc/rfc3986
[Varsig]: https://github.com/ChainAgnostic/varsig
[Verifiable credentials]: https://www.w3.org/2017/vc/WG/
[W3C]: https://www.w3.org/
[WebCrypto API]: https://developer.mozilla.org/en-US/docs/Web/API/Web_Crypto_API
[Witchcraft Software]: https://github.com/expede
[ZCAP-LD]: https://w3c-ccg.github.io/zcap-spec/
[`base58btc`]: https://github.com/multiformats/multibase/blob/master/multibase.csv#L21
[`did:key`]: https://w3c-ccg.github.io/did-method-key/
[`secp256k1`]: https://en.bitcoin.it/wiki/Secp256k1
[browser api crypto key]: https://developer.mozilla.org/en-US/docs/Web/API/CryptoKey
[capabilities]: https://en.wikipedia.org/wiki/Object-capability_model
[caps as keys]: http://www.erights.org/elib/capability/duals/myths.html#caps-as-keys
[certificate capability model]: https://web.archive.org/web/20140724054706/http://wiki.erights.org/wiki/Capability-based_Active_Invocation_Certificates
[confinement]: http://www.erights.org/elib/capability/dist-confine.html
[confused deputy problem]: https://en.wikipedia.org/wiki/Confused_deputy_problem
[constructive semantics]: https://en.wikipedia.org/wiki/Intuitionistic_logic
[content addressable storage]: https://en.wikipedia.org/wiki/Content-addressable_storage
[content addressing]: https://en.wikipedia.org/wiki/Content-addressable_storage
[dag-json multicodec]: https://github.com/multiformats/multicodec/blob/master/table.csv#L104
[delegation]: https://github.com/ucan-wg/delegation
[fail-safe]: https://en.wikipedia.org/wiki/Fail-safe
[invocation]: https://github.com/ucan-wg/invocation
[local-first]: https://www.inkandswitch.com/local-first/
[number zero]: https://n0.computer/
[passkey]: https://www.passkeys.com/
[promise]: https://github.com/ucan-wg/promise
[raw data multicodec]: https://github.com/multiformats/multicodec/blob/a03169371c0a4aec0083febc996c38c3846a0914/table.csv?plain=1#L41
[revocation]: https://github.com/ucan-wg/revocation
[secure hardware enclave]: https://support.apple.com/en-ca/guide/security/sec59b0b31ff
[spki rfc]: https://www.rfc-editor.org/rfc/rfc2693.html
[time definition]: https://en.wikipedia.org/wiki/Temporal_database
[trustless]: https://blueskyweb.xyz/blog/3-6-2022-a-self-authenticating-social-protocol
[ucan.xyz]: https://ucan.xyz

View File

@ -1,569 +0,0 @@
# Protocol Buffers in Cosmos SDK
## Overview
The Cosmos SDK uses Protocol Buffers for serialization and API definitions. Generation is handled via a Docker image: `ghcr.io/cosmos/proto-builder:0.15.x`.
## Generation Tools
- **Buf**: Primary tool for protobuf management
- **protocgen.sh**: Core generation script in `scripts/`
- **Makefile Commands**: Standard commands for generate, lint, format
## Key Components
### Buf Configuration
1. **Workspace Setup**
- Root level buf workspace configuration
- Manages multiple protobuf directories
2. **Directory Structure**
```
proto/
├── buf.gen.gogo.yaml # GoGo Protobuf generation
├── buf.gen.pulsar.yaml # Pulsar API generation
├── buf.gen.swagger.yaml # OpenAPI/Swagger docs
├── buf.lock # Dependencies
├── buf.yaml # Core configuration
├── cosmos/ # Core protos
└── tendermint/ # Consensus protos
```
3. **Module Protos**
- Located in `x/{moduleName}/proto`
- Module-specific message definitions
#### `buf.gen.gogo.yaml`
`buf.gen.gogo.yaml` defines how the protobuf files should be generated for use with in the module. This file uses [gogoproto](https://github.com/gogo/protobuf), a separate generator from the google go-proto generator that makes working with various objects more ergonomic, and it has more performant encode and decode steps
```go reference
https://github.com/cosmos/cosmos-sdk/blob/main/proto/buf.gen.gogo.yaml#L1-L9
```
#### `buf.gen.pulsar.yaml`
`buf.gen.pulsar.yaml` defines how protobuf files should be generated using the [new golang apiv2 of protobuf](https://go.dev/blog/protobuf-apiv2). This generator is used instead of the google go-proto generator because it has some extra helpers for Cosmos SDK applications and will have more performant encode and decode than the google go-proto generator. You can follow the development of this generator [here](https://github.com/cosmos/cosmos-proto).
```go reference
https://github.com/cosmos/cosmos-sdk/blob/main/proto/buf.gen.pulsar.yaml#L1-L18
```
#### `buf.gen.swagger.yaml`
`buf.gen.swagger.yaml` generates the swagger documentation for the query and messages of the chain. This will only define the REST API end points that were defined in the query and msg servers. You can find examples of this [here](https://github.com/cosmos/cosmos-sdk/blob/main/x/bank/proto/cosmos/bank/v1beta1/query.proto)
```go reference
https://github.com/cosmos/cosmos-sdk/blob/main/proto/buf.gen.swagger.yaml#L1-L6
```
#### `buf.lock`
This is an autogenerated file based off the dependencies required by the `.gen` files. There is no need to copy the current one. If you depend on cosmos-sdk proto definitions a new entry for the Cosmos SDK will need to be provided. The dependency you will need to use is `buf.build/cosmos/cosmos-sdk`.
```go reference
https://github.com/cosmos/cosmos-sdk/blob/main/proto/buf.lock#L1-L16
```
#### `buf.yaml`
`buf.yaml` defines the [name of your package](https://github.com/cosmos/cosmos-sdk/blob/main/proto/buf.yaml#L3), which [breakage checker](https://buf.build/docs/tutorials/getting-started-with-buf-cli#detect-breaking-changes) to use and how to [lint your protobuf files](https://buf.build/docs/tutorials/getting-started-with-buf-cli#lint-your-api).
It is advised to use a tagged version of the buf modules corresponding to the version of the Cosmos SDK being are used.
```go reference
https://github.com/cosmos/cosmos-sdk/blob/main/proto/buf.yaml#L1-L24
```
We use a variety of linters for the Cosmos SDK protobuf files. The repo also checks this in ci.
A reference to the github actions can be found [here](https://github.com/cosmos/cosmos-sdk/blob/main/.github/workflows/proto.yml#L1-L32)
# ORM
The Cosmos SDK ORM is a state management library that provides a rich, but opinionated set of tools for managing a
module's state. It provides support for:
- type safe management of state
- multipart keys
- secondary indexes
- unique indexes
- easy prefix and range queries
- automatic genesis import/export
- automatic query services for clients, including support for light client proofs (still in development)
- indexing state data in external databases (still in development)
## Design and Philosophy
The ORM's data model is inspired by the relational data model found in SQL databases. The core abstraction is a table
with a primary key and optional secondary indexes.
Because the Cosmos SDK uses protobuf as its encoding layer, ORM tables are defined directly in .proto files using
protobuf options. Each table is defined by a single protobuf `message` type and a schema of multiple tables is
represented by a single .proto file.
Table structure is specified in the same file where messages are defined in order to make it easy to focus on better
design of the state layer. Because blockchain state layout is part of the public API for clients (TODO: link to docs on
light client proofs), it is important to think about the state layout as being part of the public API of a module.
Changing the state layout actually breaks clients, so it is ideal to think through it carefully up front and to aim for
a design that will eliminate or minimize breaking changes down the road. Also, good design of state enables building
more performant and sophisticated applications. Providing users with a set of tools inspired by relational databases
which have a long history of database design best practices and allowing schema to be specified declaratively in a
single place are design choices the ORM makes to enable better design and more durable APIs.
Also, by only supporting the table abstraction as opposed to key-value pair maps, it is easy to add to new
columns/fields to any data structure without causing a breaking change and the data structures can easily be indexed in
any off-the-shelf SQL database for more sophisticated queries.
The encoding of fields in keys is designed to support ordered iteration for all protobuf primitive field types
except for `bytes` as well as the well-known types `google.protobuf.Timestamp` and `google.protobuf.Duration`. Encodings
are optimized for storage space when it makes sense (see the documentation in `cosmos/orm/v1/orm.proto` for more details)
and table rows do not use extra storage space to store key fields in the value.
We recommend that users of the ORM attempt to follow database design best practices such as
[normalization](https://en.wikipedia.org/wiki/Database_normalization) (at least 1NF).
For instance, defining `repeated` fields in a table is considered an anti-pattern because breaks first normal form (1NF).
Although we support `repeated` fields in tables, they cannot be used as key fields for this reason. This may seem
restrictive but years of best practice (and also experience in the SDK) have shown that following this pattern
leads to easier to maintain schemas.
To illustrate the motivation for these principles with an example from the SDK, historically balances were stored
as a mapping from account -> map of denom to amount. This did not scale well because an account with 100 token balances
needed to be encoded/decoded every time a single coin balance changed. Now balances are stored as account,denom -> amount
as in the example above. With the ORM's data model, if we wanted to add a new field to `Balance` such as
`unlocked_balance` (if vesting accounts were redesigned in this way), it would be easy to add it to this table without
requiring a data migration. Because of the ORM's optimizations, the account and denom are only stored in the key part
of storage and not in the value leading to both a flexible data model and efficient usage of storage.
## Defining Tables
To define a table:
1. create a .proto file to describe the module's state (naming it `state.proto` is recommended for consistency),
and import "cosmos/orm/v1/orm.proto", ex:
```protobuf
syntax = "proto3";
package bank_example;
import "cosmos/orm/v1/orm.proto";
```
2. define a `message` for the table, ex:
```protobuf
message Balance {
bytes account = 1;
string denom = 2;
uint64 balance = 3;
}
```
3. add the `cosmos.orm.v1.table` option to the table and give the table an `id` unique within this .proto file:
```protobuf
message Balance {
option (cosmos.orm.v1.table) = {
id: 1
};
bytes account = 1;
string denom = 2;
uint64 balance = 3;
}
```
4. define the primary key field or fields, as a comma-separated list of the fields from the message which should make
up the primary key:
```protobuf
message Balance {
option (cosmos.orm.v1.table) = {
id: 1
primary_key: { fields: "account,denom" }
};
bytes account = 1;
string denom = 2;
uint64 balance = 3;
}
```
5. add any desired secondary indexes by specifying an `id` unique within the table and a comma-separate list of the
index fields:
```protobuf
message Balance {
option (cosmos.orm.v1.table) = {
id: 1;
primary_key: { fields: "account,denom" }
index: { id: 1 fields: "denom" } // this allows querying for the accounts which own a denom
};
bytes account = 1;
string denom = 2;
uint64 amount = 3;
}
```
### Auto-incrementing Primary Keys
A common pattern in SDK modules and in database design is to define tables with a single integer `id` field with an
automatically generated primary key. In the ORM we can do this by setting the `auto_increment` option to `true` on the
primary key, ex:
```protobuf
message Account {
option (cosmos.orm.v1.table) = {
id: 2;
primary_key: { fields: "id", auto_increment: true }
};
uint64 id = 1;
bytes address = 2;
}
```
### Unique Indexes
A unique index can be added by setting the `unique` option to `true` on an index, ex:
```protobuf
message Account {
option (cosmos.orm.v1.table) = {
id: 2;
primary_key: { fields: "id", auto_increment: true }
index: {id: 1, fields: "address", unique: true}
};
uint64 id = 1;
bytes address = 2;
}
```
### Singletons
The ORM also supports a special type of table with only one row called a `singleton`. This can be used for storing
module parameters. Singletons only need to define a unique `id` and that cannot conflict with the id of other
tables or singletons in the same .proto file. Ex:
```protobuf
message Params {
option (cosmos.orm.v1.singleton) = {
id: 3;
};
google.protobuf.Duration voting_period = 1;
uint64 min_threshold = 2;
}
```
## Running Codegen
NOTE: the ORM will only work with protobuf code that implements the [google.golang.org/protobuf](https://pkg.go.dev/google.golang.org/protobuf)
API. That means it will not work with code generated using gogo-proto.
To install the ORM's code generator, run:
```shell
go install cosmossdk.io/orm/cmd/protoc-gen-go-cosmos-orm@latest
```
The recommended way to run the code generator is to use [buf build](https://docs.buf.build/build/usage).
This is an example `buf.gen.yaml` that runs `protoc-gen-go`, `protoc-gen-go-grpc` and `protoc-gen-go-cosmos-orm`
using buf managed mode:
```yaml
version: v1
managed:
enabled: true
go_package_prefix:
default: foo.bar/api # the go package prefix of your package
override:
buf.build/cosmos/cosmos-sdk: cosmossdk.io/api # required to import the Cosmos SDK api module
plugins:
- name: go
out: .
opt: paths=source_relative
- name: go-grpc
out: .
opt: paths=source_relative
- name: go-cosmos-orm
out: .
opt: paths=source_relative
```
## Using the ORM in a module
### Initialization
To use the ORM in a module, first create a `ModuleSchemaDescriptor`. This tells the ORM which .proto files have defined
an ORM schema and assigns them all a unique non-zero id. Ex:
```go
var MyModuleSchema = &ormv1alpha1.ModuleSchemaDescriptor{
SchemaFile: []*ormv1alpha1.ModuleSchemaDescriptor_FileEntry{
{
Id: 1,
ProtoFileName: mymodule.File_my_module_state_proto.Path(),
},
},
}
```
In the ORM generated code for a file named `state.proto`, there should be an interface `StateStore` that got generated
with a constructor `NewStateStore` that takes a parameter of type `ormdb.ModuleDB`. Add a reference to `StateStore`
to your module's keeper struct. Ex:
```go
type Keeper struct {
db StateStore
}
```
Then instantiate the `StateStore` instance via an `ormdb.ModuleDB` that is instantiated from the `SchemaDescriptor`
above and one or more store services from `cosmossdk.io/core/store`. Ex:
```go
func NewKeeper(storeService store.KVStoreService) (*Keeper, error) {
modDb, err := ormdb.NewModuleDB(MyModuleSchema, ormdb.ModuleDBOptions{KVStoreService: storeService})
if err != nil {
return nil, err
}
db, err := NewStateStore(modDb)
if err != nil {
return nil, err
}
return Keeper{db: db}, nil
}
```
### Using the generated code
The generated code for the ORM contains methods for inserting, updating, deleting and querying table entries.
For each table in a .proto file, there is a type-safe table interface implemented in generated code. For instance,
for a table named `Balance` there should be a `BalanceTable` interface that looks like this:
```go
type BalanceTable interface {
Insert(ctx context.Context, balance *Balance) error
Update(ctx context.Context, balance *Balance) error
Save(ctx context.Context, balance *Balance) error
Delete(ctx context.Context, balance *Balance) error
Has(ctx context.Context, account []byte, denom string) (found bool, err error)
// Get returns nil and an error which responds true to ormerrors.IsNotFound() if the record was not found.
Get(ctx context.Context, account []byte, denom string) (*Balance, error)
List(ctx context.Context, prefixKey BalanceIndexKey, opts ...ormlist.Option) (BalanceIterator, error)
ListRange(ctx context.Context, from, to BalanceIndexKey, opts ...ormlist.Option) (BalanceIterator, error)
DeleteBy(ctx context.Context, prefixKey BalanceIndexKey) error
DeleteRange(ctx context.Context, from, to BalanceIndexKey) error
doNotImplement()
}
```
This `BalanceTable` should be accessible from the `StateStore` interface (assuming our file is named `state.proto`)
via a `BalanceTable()` accessor method. If all the above example tables/singletons were in the same `state.proto`,
then `StateStore` would get generated like this:
```go
type BankStore interface {
BalanceTable() BalanceTable
AccountTable() AccountTable
ParamsTable() ParamsTable
doNotImplement()
}
```
So to work with the `BalanceTable` in a keeper method we could use code like this:
```go
func (k keeper) AddBalance(ctx context.Context, acct []byte, denom string, amount uint64) error {
balance, err := k.db.BalanceTable().Get(ctx, acct, denom)
if err != nil && !ormerrors.IsNotFound(err) {
return err
}
if balance == nil {
balance = &Balance{
Account: acct,
Denom: denom,
Amount: amount,
}
} else {
balance.Amount = balance.Amount + amount
}
return k.db.BalanceTable().Save(ctx, balance)
}
```
`List` methods take `IndexKey` parameters. For instance, `BalanceTable.List` takes `BalanceIndexKey`. `BalanceIndexKey`
let's represent index keys for the different indexes (primary and secondary) on the `Balance` table. The primary key
in the `Balance` table gets a struct `BalanceAccountDenomIndexKey` and the first index gets an index key `BalanceDenomIndexKey`.
If we wanted to list all the denoms and amounts that an account holds, we would use `BalanceAccountDenomIndexKey`
with a `List` query just on the account prefix. Ex:
```go
it, err := keeper.db.BalanceTable().List(ctx, BalanceAccountDenomIndexKey{}.WithAccount(acct))
```
---
## sidebar_position: 1
# ProtocolBuffer Annotations
This document explains the various protobuf scalars that have been added to make working with protobuf easier for Cosmos SDK application developers
## Signer
Signer specifies which field should be used to determine the signer of a message for the Cosmos SDK. This field can be used for clients as well to infer which field should be used to determine the signer of a message.
Read more about the signer field [here](./02-messages-and-queries.md).
```protobuf reference
https://github.com/cosmos/cosmos-sdk/blob/e6848d99b55a65d014375b295bdd7f9641aac95e/proto/cosmos/bank/v1beta1/tx.proto#L40
```
```proto
option (cosmos.msg.v1.signer) = "from_address";
```
## Scalar
The scalar type defines a way for clients to understand how to construct protobuf messages according to what is expected by the module and sdk.
```proto
(cosmos_proto.scalar) = "cosmos.AddressString"
```
Example of account address string scalar:
```proto reference
https://github.com/cosmos/cosmos-sdk/blob/e6848d99b55a65d014375b295bdd7f9641aac95e/proto/cosmos/bank/v1beta1/tx.proto#L46
```
Example of validator address string scalar:
```proto reference
https://github.com/cosmos/cosmos-sdk/blob/e8f28bf5db18b8d6b7e0d94b542ce4cf48fed9d6/proto/cosmos/distribution/v1beta1/query.proto#L87
```
Example of pubkey scalar:
```proto reference
https://github.com/cosmos/cosmos-sdk/blob/11068bfbcd44a7db8af63b6a8aa079b1718f6040/proto/cosmos/staking/v1beta1/tx.proto#L94
```
Example of Decimals scalar:
```proto reference
https://github.com/cosmos/cosmos-sdk/blob/e8f28bf5db18b8d6b7e0d94b542ce4cf48fed9d6/proto/cosmos/distribution/v1beta1/distribution.proto#L26
```
Example of Int scalar:
```proto reference
https://github.com/cosmos/cosmos-sdk/blob/e8f28bf5db18b8d6b7e0d94b542ce4cf48fed9d6/proto/cosmos/gov/v1/gov.proto#L137
```
There are a few options for what can be provided as a scalar: `cosmos.AddressString`, `cosmos.ValidatorAddressString`, `cosmos.ConsensusAddressString`, `cosmos.Int`, `cosmos.Dec`.
## Implements_Interface
Implement interface is used to provide information to client tooling like [telescope](https://github.com/cosmology-tech/telescope) on how to encode and decode protobuf messages.
```proto
option (cosmos_proto.implements_interface) = "cosmos.auth.v1beta1.AccountI";
```
## Method,Field,Message Added In
`method_added_in`, `field_added_in` and `message_added_in` are annotations to denotate to clients that a field has been supported in a later version. This is useful when new methods or fields are added in later versions and that the client needs to be aware of what it can call.
The annotation should be worded as follow:
```proto
option (cosmos_proto.method_added_in) = "cosmos-sdk v0.50.1";
option (cosmos_proto.method_added_in) = "x/epochs v1.0.0";
option (cosmos_proto.method_added_in) = "simapp v24.0.0";
```
## Amino
The amino codec was removed in `v0.50+`, this means there is not a need register `legacyAminoCodec`. To replace the amino codec, Amino protobuf annotations are used to provide information to the amino codec on how to encode and decode protobuf messages.
:::note
Amino annotations are only used for backwards compatibility with amino. New modules are not required use amino annotations.
:::
The below annotations are used to provide information to the amino codec on how to encode and decode protobuf messages in a backwards compatible manner.
### Name
Name specifies the amino name that would show up for the user in order for them see which message they are signing.
```proto
option (amino.name) = "cosmos-sdk/BaseAccount";
```
```proto reference
https://github.com/cosmos/cosmos-sdk/blob/e8f28bf5db18b8d6b7e0d94b542ce4cf48fed9d6/proto/cosmos/bank/v1beta1/tx.proto#L41
```
### Field_Name
Field name specifies the amino name that would show up for the user in order for them see which field they are signing.
```proto
uint64 height = 1 [(amino.field_name) = "public_key"];
```
```proto reference
https://github.com/cosmos/cosmos-sdk/blob/e8f28bf5db18b8d6b7e0d94b542ce4cf48fed9d6/proto/cosmos/distribution/v1beta1/distribution.proto#L166
```
### Dont_OmitEmpty
Dont omitempty specifies that the field should not be omitted when encoding to amino.
```proto
repeated cosmos.base.v1beta1.Coin amount = 3 [(amino.dont_omitempty) = true];
```
```proto reference
https://github.com/cosmos/cosmos-sdk/blob/e8f28bf5db18b8d6b7e0d94b542ce4cf48fed9d6/proto/cosmos/bank/v1beta1/bank.proto#L56
```
### Encoding
Encoding instructs the amino json marshaler how to encode certain fields that may differ from the standard encoding behaviour. The most common example of this is how `repeated cosmos.base.v1beta1.Coin` is encoded when using the amino json encoding format. The `legacy_coins` option tells the json marshaler [how to encode a null slice](https://github.com/cosmos/cosmos-sdk/blob/e8f28bf5db18b8d6b7e0d94b542ce4cf48fed9d6/x/tx/signing/aminojson/json_marshal.go#L65) of `cosmos.base.v1beta1.Coin`.
```proto
(amino.encoding) = "legacy_coins",
```
```proto reference
https://github.com/cosmos/cosmos-sdk/blob/e8f28bf5db18b8d6b7e0d94b542ce4cf48fed9d6/proto/cosmos/bank/v1beta1/genesis.proto#L23
```
Another example is a protobuf `bytes` that contains a valid JSON document.
The `inline_json` option tells the json marshaler to embed the JSON bytes into the wrapping document without escaping.
```proto
(amino.encoding) = "inline_json",
```
E.g. the bytes containing `{"foo":123}` in the `envelope` field would lead to the following JSON:
```json
{
"envelope": {
"foo": 123
}
}
```
If the bytes are not valid JSON, this leads to JSON broken documents. Thus a JSON validity check needs to be in place at some point of the process.

View File

@ -1,627 +0,0 @@
# RFC 004: Account System Refactor
## Status
- Draft v2 (May 2023)
## Current Limitations
1. **Account Representation**: Limited by `google.Protobuf.Any` encapsulation and basic authentication methods
2. **Interface Constraints**: Lacks support for advanced functionalities like vesting and complex auth systems
3. **Implementation Rigidity**: Poor differentiation between account types (e.g., `ModuleAccount`)
4. **Authorization System**: Basic `x/auth` module with limited scope beyond `x/bank` functionality
5. **Dependency Issues**: Cyclic dependencies between modules (e.g., `x/auth``x/bank` for vesting)
## Proposal
This proposal aims to transform the way accounts are managed within the Cosmos SDK by introducing significant changes to
their structure and functionality.
### Rethinking Account Representation and Business Logic
Instead of representing accounts as simple `google.Protobuf.Any` structures stored in state with no business logic
attached, this proposal suggests a more sophisticated account representation that is closer to module entities.
In fact, accounts should be able to receive messages and process them in the same way modules do, and be capable of storing
state in a isolated (prefixed) portion of state belonging only to them, in the same way as modules do.
### Account Message Reception
We propose that accounts should be able to receive messages in the same way modules can, allowing them to manage their
own state modifications without relying on other modules. This change would enable more advanced account functionality, such as the
`VestingAccount` example, where the x/bank module previously needed to change the vestingState by casting the abstracted
account to `VestingAccount` and triggering the `TrackDelegation` call. Accounts are already capable of sending messages when
a state transition, originating from a transaction, is executed.
When accounts receive messages, they will be able to identify the sender of the message and decide how to process the
state transition, if at all.
### Consequences
These changes would have significant implications for the Cosmos SDK, resulting in a system of actors that are equal from
the runtime perspective. The runtime would only be responsible for propagating messages between actors and would not
manage the authorization system. Instead, actors would manage their own authorizations. For instance, there would be no
need for the `x/auth` module to manage minting or burning of coins permissions, as it would fall within the scope of the
`x/bank` module.
The key difference between accounts and modules would lie in the origin of the message (state transition). Accounts
(ExternallyOwnedAccount), which have credentials (e.g., a public/private key pairing), originate state transitions from
transactions. In contrast, module state transitions do not have authentication credentials backing them and can be
caused by two factors: either as a consequence of a state transition coming from a transaction or triggered by a scheduler
(e.g., the runtime's Begin/EndBlock).
By implementing these proposed changes, the Cosmos SDK will benefit from a more extensible, versatile, and efficient account
management system that is better suited to address the requirements of the Cosmos ecosystem.
#### Standardization
With `x/accounts` allowing a modular api there becomes a need for standardization of accounts or the interfaces wallets and other clients should expect to use. For this reason we will be using the [`CIP` repo](https://github.com/cosmos/cips) in order to standardize interfaces in order for wallets to know what to expect when interacting with accounts.
## Implementation
### Account Definition
We define the new `Account` type, which is what an account needs to implement to be treated as such.
An `Account` type is defined at APP level, so it cannot be dynamically loaded as the chain is running without upgrading the
node code, unless we create something like a `CosmWasmAccount` which is an account backed by an `x/wasm` contract.
```go
// Account is what the developer implements to define an account.
type Account[InitMsg proto.Message] interface {
// Init is the function that initialises an account instance of a given kind.
// InitMsg is used to initialise the initial state of an account.
Init(ctx *Context, msg InitMsg) error
// RegisterExecuteHandlers registers an account's execution messages.
RegisterExecuteHandlers(executeRouter *ExecuteRouter)
// RegisterQueryHandlers registers an account's query messages.
RegisterQueryHandlers(queryRouter *QueryRouter)
// RegisterMigrationHandlers registers an account's migration messages.
RegisterMigrationHandlers(migrationRouter *MigrationRouter)
}
```
### The InternalAccount definition
The public `Account` interface implementation is then converted by the runtime into an `InternalAccount` implementation,
which contains all the information and business logic needed to operate the account.
```go
type Schema struct {
state StateSchema // represents the state of an account
init InitSchema // represents the init msg schema
exec ExecSchema // represents the multiple execution msg schemas, containing also responses
query QuerySchema // represents the multiple query msg schemas, containing also responses
migrate *MigrateSchema // represents the multiple migrate msg schemas, containing also responses, it's optional
}
type InternalAccount struct {
init func(ctx *Context, msg proto.Message) (*InitResponse, error)
execute func(ctx *Context, msg proto.Message) (*ExecuteResponse, error)
query func(ctx *Context, msg proto.Message) (proto.Message, error)
schema func() *Schema
migrate func(ctx *Context, msg proto.Message) (*MigrateResponse, error)
}
```
This is an internal view of the account as intended by the system. It is not meant to be what developers implement. An
example implementation of the `InternalAccount` type can be found in [this](https://github.com/testinginprod/accounts-poc/blob/main/examples/recover/recover.go)
example of account whose credentials can be recovered. In fact, even if the `Internal` implementation is untyped (with
respect to `proto.Message`), the concrete implementation is fully typed.
During any of the execution methods of `InternalAccount`, `schema` excluded, the account is given a `Context` which provides:
- A namespaced `KVStore` for the account, which isolates the account state from others (NOTE: no `store keys` needed,
the account address serves as `store key`).
- Information regarding itself (its address)
- Information regarding the sender.
- ...
#### Init
Init defines the entrypoint that allows for a new account instance of a given kind to be initialised.
The account is passed some opaque protobuf message which is then interpreted and contains the instructions that
constitute the initial state of an account once it is deployed.
An `Account` code can be deployed multiple times through the `Init` function, similar to how a `CosmWasm` contract code
can be deployed (Instantiated) multiple times.
#### Execute
Execute defines the entrypoint that allows an `Account` to process a state transition, the account can decide then how to
process the state transition based on the message provided and the sender of the transition.
#### Query
Query defines a read-only entrypoint that provides a stable interface that links an account with its state. The reason for
which `Query` is still being preferred as an addition to raw state reflection is to:
- Provide a stable interface for querying (state can be optimised and change more frequently than a query)
- Provide a way to define an account `Interface` with respect to its `Read/Write` paths.
- Provide a way to query information that cannot be processed from raw state reflection, ex: compute information from lazy
state that has not been yet concretely processed (eg: balances with respect to lazy inputs/outputs)
#### Schema
Schema provides the definition of an account from `API` perspective, and it's the only thing that should be taken into account
when interacting with an account from another account or module, for example: an account is an `authz-interface` account if
it has the following message in its execution messages `MsgProxyStateTransition{ state_transition: google.Protobuf.Any }`.
### Migrate
Migrate defines the entrypoint that allows an `Account` to migrate its state from a previous version to a new one. Migrations
can be initiated only by the account itself, concretely this means that the migrate action sender can only be the account address
itself, if the account wants to allow another address to migrate it on its behalf then it could create an execution message
that makes the account migrate itself.
### x/accounts module
In order to create accounts we define a new module `x/accounts`, note that `x/accounts` deploys account with no authentication
credentials attached to it which means no action of an account can be incepted from a TX, we will later explore how the
`x/authn` module uses `x/accounts` to deploy authenticated accounts.
This also has another important implication for which account addresses are now fully decoupled from the authentication mechanism
which makes in turn off-chain operations a little more complex, as the chain becomes the real link between account identifier
and credentials.
We could also introduce a way to deterministically compute the account address.
Note, from the transaction point of view, the `init_message` and `execute_message` are opaque `google.Protobuf.Any`.
The module protobuf definition for `x/accounts` are the following:
```protobuf
// Msg defines the Msg service.
service Msg {
rpc Deploy(MsgDeploy) returns (MsgDeployResponse);
rpc Execute(MsgExecute) returns (MsgExecuteResponse);
rpc Migrate(MsgMigrate) returns (MsgMigrateResponse);
}
message MsgDeploy {
string sender = 1;
string kind = 2;
google.Protobuf.Any init_message = 3;
repeated google.Protobuf.Any authorize_messages = 4 [(gogoproto.nullable) = false];
}
message MsgDeployResponse {
string address = 1;
uint64 id = 2;
google.Protobuf.Any data = 3;
}
message MsgExecute {
string sender = 1;
string address = 2;
google.Protobuf.Any message = 3;
repeated google.Protobuf.Any authorize_messages = 4 [(gogoproto.nullable) = false];
}
message MsgExecuteResponse {
google.Protobuf.Any data = 1;
}
message MsgMigrate {
string sender = 1;
string new_account_kind = 2;
google.Protobuf.Any migrate_message = 3;
}
message MsgMigrateResponse {
google.Protobuf.Any data = 1;
}
```
#### MsgDeploy
Deploys a new instance of the given account `kind` with initial settings represented by the `init_message` which is a `google.Protobuf.Any`.
Of course the `init_message` can be empty. A response is returned containing the account ID and humanised address, alongside some response
that the account instantiation might produce.
#### Address derivation
In order to decouple public keys from account addresses, we introduce a new address derivation mechanism which is
#### MsgExecute
Sends a `StateTransition` execution request, where the state transition is represented by the `message` which is a `google.Protobuf.Any`.
The account can then decide if to process it or not based on the `sender`.
### MsgMigrate
Migrates an account to a new version of itself, the new version is represented by the `new_account_kind`. The state transition
can only be incepted by the account itself, which means that the `sender` must be the account address itself. During the migration
the account current state is given to the new version of the account, which then executes the migration logic using the `migrate_message`,
it might change state or not, it's up to the account to decide. The response contains possible data that the account might produce
after the migration.
#### Authorize Messages
The `Deploy` and `Execute` messages have a field in common called `authorize_messages`, these messages are messages that the account
can execute on behalf of the sender. For example, in case an account is expecting some funds to be sent from the sender,
the sender can attach a `MsgSend` that the account can execute on the sender's behalf. These authorizations are short-lived,
they live only for the duration of the `Deploy` or `Execute` message execution, or until they are consumed.
An alternative would have been to add a `funds` field, like it happens in cosmwasm, which guarantees the called contract that
the funds are available and sent in the context of the message execution. This would have been a simpler approach, but it would
have been limited to the context of `MsgSend` only, where the asset is `sdk.Coins`. The proposed generic way, instead, allows
the account to execute any message on behalf of the sender, which is more flexible, it could include NFT send execution, or
more complex things like `MsgMultiSend` or `MsgDelegate`, etc.
### Further discussion
#### Sub-accounts
We could provide a way to link accounts to other accounts. Maybe during deployment the sender could decide to link the
newly created to its own account, although there might be use-cases for which the deployer is different from the account
that needs to be linked, in this case a handshake protocol on linking would need to be defined.
#### Predictable address creation
We need to provide a way to create an account with a predictable address, this might serve a lot of purposes, like accounts
wanting to generate an address that:
- nobody else can claim besides the account used to generate the new account
- is predictable
For example:
```protobuf
message MsgDeployPredictable {
string sender = 1;
uint32 nonce = 2;
...
}
```
And then the address becomes `bechify(concat(sender, nonce))`
`x/accounts` would still use the monotonically increasing sequence as account number.
#### Joining Multiple Accounts
As developers are building new kinds of accounts, it becomes necessary to provide a default way to combine the
functionalities of different account types. This allows developers to avoid duplicating code and enables end-users to
create or migrate to accounts with multiple functionalities without requiring custom development.
To address this need, we propose the inclusion of a default account type called "MultiAccount". The MultiAccount type is
designed to merge the functionalities of other accounts by combining their execution, query, and migration APIs.
The account joining process would only fail in the case of API (intended as non-state Schema APIs) conflicts, ensuring
compatibility and consistency.
With the introduction of the MultiAccount type, users would have the option to either migrate their existing accounts to
a MultiAccount type or extend an existing MultiAccount with newer APIs. This flexibility empowers users to leverage
various account functionalities without compromising compatibility or resorting to manual code duplication.
The MultiAccount type serves as a standardized solution for combining different account functionalities within the
cosmos-sdk ecosystem. By adopting this approach, developers can streamline the development process and users can benefit
from a modular and extensible account system.
# ADR 071: Cryptography v2- Multi-curve support
## Change log
- May 7th 2024: Initial Draft (Zondax AG: @raynaudoe @juliantoledano @jleni @educlerici-zondax @lucaslopezf)
- June 13th 2024: Add CometBFT implementation proposal (Zondax AG: @raynaudoe @juliantoledano @jleni @educlerici-zondax @lucaslopezf)
- July 2nd 2024: Split ADR proposal, add link to ADR in cosmos/crypto (Zondax AG: @raynaudoe @juliantoledano @jleni @educlerici-zondax @lucaslopezf)
## Status
DRAFT
## Abstract
This ADR proposes the refactoring of the existing `Keyring` and `cosmos-sdk/crypto` code to implement [ADR-001-CryptoProviders](https://github.com/cosmos/crypto/blob/main/docs/architecture/adr-001-crypto-provider.md).
For in-depth details of the `CryptoProviders` and their design please refer to ADR mentioned above.
## Introduction
The introduction of multi-curve support in the cosmos-sdk cryptographic package offers significant advantages. By not being restricted to a single cryptographic curve, developers can choose the most appropriate curve based on security, performance, and compatibility requirements. This flexibility enhances the application's ability to adapt to evolving security standards and optimizes performance for specific use cases, helping to future-proofing the sdk's cryptographic capabilities.
The enhancements in this proposal not only render the ["Keyring ADR"](https://github.com/cosmos/cosmos-sdk/issues/14940) obsolete, but also encompass its key aspects, replacing it with a more flexible and comprehensive approach. Furthermore, the gRPC service proposed in the mentioned ADR can be easily implemented as a specialized `CryptoProvider`.
### Glossary
1. **Interface**: In the context of this document, "interface" refers to Go's interface.
2. **Module**: In this document, "module" refers to a Go module.
3. **Package**: In the context of Go, a "package" refers to a unit of code organization.
## Context
In order to fully understand the need for changes and the proposed improvements, it's crucial to consider the current state of affairs:
- The Cosmos SDK currently lacks a comprehensive ADR for the cryptographic package.
- If a blockchain project requires a cryptographic curve that is not supported by the current SDK, the most likely scenario is that they will need to fork the SDK repository and make modifications. These modifications could potentially make the fork incompatible with future updates from the upstream SDK, complicating maintenance and integration.
- Type leakage of specific crypto data types expose backward compatibility and extensibility challenges.
- The demand for a more flexible and extensible approach to cryptography and address management is high.
- Architectural changes are necessary to resolve many of the currently open issues related to new curves support.
- There is a current trend towards modularity in the Interchain stack (e.g., runtime modules).
- Security implications are a critical consideration during the redesign work.
## Objectives
The key objectives for this proposal are:
- Leverage `CryptoProviders`: Utilize them as APIs for cryptographic tools, ensuring modularity, flexibility, and ease of integration.
Developer-Centric Approach
- Prioritize clear, intuitive interfaces and best-practice design principles.
Quality Assurance
- Enhanced Test Coverage: Improve testing methodologies to ensure the robustness and reliability of the module.
## Technical Goals
New Keyring:
- Design a new `Keyring` interface with modular backends injection system to support hardware devices and cloud-based HSMs. This feature is optional and tied to complexity; if it proves too complex, it will be deferred to a future release as an enhancement.
## Proposed architecture
### Components
The main components to be used will be the same as those found in the [ADR-001](https://github.com/cosmos/crypto/blob/main/docs/architecture/adr-001-crypto-provider.md#components).
#### Storage and persistence
The storage and persistence layer is tasked with storing a `CryptoProvider`s. Specifically, this layer must:
- Securely store the crypto provider's associated private key (only if stored locally, otherwise a reference to the private key will be stored instead).
- Store the [`ProviderMetadata`](https://github.com/cosmos/crypto/blob/main/docs/architecture/adr-001-crypto-provider.md#metadata) struct which contains the data that distinguishes that provider.
The purpose of this layer is to ensure that upon retrieval of the persisted data, we can access the provider's type, version, and specific configuration (which varies based on the provider type). This information will subsequently be utilized to initialize the appropriate factory, as detailed in the following section on the factory pattern.
The storage proposal involves using a modified version of the [Record](https://github.com/cosmos/cosmos-sdk/blob/main/proto/cosmos/crypto/keyring/v1/record.proto) struct, which is already defined in **Keyring/v1**. Additionally, we propose utilizing the existing keyring backends (keychain, filesystem, memory, etc.) to store these `Record`s in the same manner as the current **Keyring/v1**.
_Note: This approach will facilitate a smoother migration path from the current Keyring/v1 to the proposed architecture._
Below is the proposed protobuf message to be included in the modified `Record.proto` file
##### Protobuf message structure
The [record.proto](https://github.com/cosmos/cosmos-sdk/blob/main/proto/cosmos/crypto/keyring/v1/record.proto) file will be modified to include the `CryptoProvider` message as an optional field as follows.
```protobuf
// record.proto
message Record {
string name = 1;
google.protobuf.Any pub_key = 2;
oneof item {
Local local = 3;
Ledger ledger = 4;
Multi multi = 5;
Offline offline = 6;
CryptoProvider crypto_provider = 7; // <- New
}
message Local {
google.protobuf.Any priv_key = 1;
}
message Ledger {
hd.v1.BIP44Params path = 1;
}
message Multi {}
message Offline {}
}
```
##### Creating and loading a `CryptoProvider`
For creating providers, we propose a _factory pattern_ and a _registry_ for these builders. Examples of these
patterns can be found [here](https://github.com/cosmos/crypto/blob/main/docs/architecture/adr-001-crypto-provider.md#illustrative-code-snippets)
##### Keyring
The new `Keyring` interface will serve as a central hub for managing and fetching `CryptoProviders`. To ensure a smoother migration path, the new Keyring will be backward compatible with the previous version. Since this will be the main API from which applications will obtain their `CryptoProvider` instances, the proposal is to extend the Keyring interface to include the methods:
```go
type KeyringV2 interface {
// methods from Keyring/v1
// ListCryptoProviders returns a list of all the stored CryptoProvider metadata.
ListCryptoProviders() ([]ProviderMetadata, error)
// GetCryptoProvider retrieves a specific CryptoProvider by its id.
GetCryptoProvider(id string) (CryptoProvider, error)
}
```
_Note_: Methods to obtain a provider from a public key or other means that make it easier to load the desired provider can be added.
##### Especial use case: remote signers
It's important to note that the `CryptoProvider` interface is versatile enough to be implemented as a remote signer. This capability allows for the integration of remote cryptographic operations, which can be particularly useful in distributed or cloud-based environments where local cryptographic resources are limited or need to be managed centrally.
## Alternatives
It is important to note that all the code presented in this document is not in its final form and could be subject to changes at the time of implementation. The examples and implementations discussed should be interpreted as alternatives, providing a conceptual framework rather than definitive solutions. This flexibility allows for adjustments based on further insights, technical evaluations, or changing requirements as development progresses.
## Decision
We will:
- Leverage crypto providers
- Refactor the module structure as described above.
- Define types and interfaces as the code attached.
- Refactor existing code into new structure and interfaces.
- Implement Unit Tests to ensure no backward compatibility issues.
## Consequences
### Impact on the SDK codebase
We can divide the impact of this ADR into two main categories: state machine code and client related code.
#### Client
The major impact will be on the client side, where the current `Keyring` interface will be replaced by the new `KeyringV2` interface. At first, the impact will be low since `CryptoProvider` is an optional field in the `Record` message, so there's no mandatory requirement for migrating to this new concept right away. This allows a progressive transition where the risks of breaking changes or regressions are minimized.
#### State Machine
The impact on the state machine code will be minimal, the modules affected (at the time of writing this ADR)
are the `x/accounts` module, specifically the `Authenticate` function and the `x/auth/ante` module. This function will need to be adapted to use a `CryptoProvider` service to make use of the `Verifier` instance.
Worth mentioning that there's also the alternative of using `Verifier` instances in a standalone fashion (see note below).
The specific way to adapt these modules will be deeply analyzed and decided at implementation time of this ADR.
_Note_: All cryptographic tools (hashers, verifiers, signers, etc.) will continue to be available as standalone packages that can be imported and utilized directly without the need for a `CryptoProvider` instance. However, the `CryptoProvider` is the recommended method for using these tools as it offers a more secure way to handle sensitive data, enhanced modularity, and the ability to store configurations and metadata within the `CryptoProvider` definition.
### Backwards Compatibility
The proposed migration path is similar to what the cosmos-sdk has done in the past. To ensure a smooth transition, the following steps will be taken:
Once ADR-001 is implemented with a stable release:
- Deprecate the old crypto package. The old crypto package will still be usable, but it will be marked as deprecated and users can opt to use the new package.
- Migrate the codebase to use the new cosmos/crypto package and remove the old crypto one.
### Positive
- Single place of truth
- Easier to use interfaces
- Easier to extend
- Unit test for each crypto package
- Greater maintainability
- Incentivize addition of implementations instead of forks
- Decoupling behavior from implementation
- Sanitization of code
### Negative
- It will involve an effort to adapt existing code.
- It will require attention to detail and audition.
### Neutral
- It will involve extensive testing.
## Test Cases
- The code will be unit tested to ensure a high code coverage
- There should be integration tests around Keyring and CryptoProviders.
> While an ADR is in the DRAFT or PROPOSED stage, this section should contain a
> summary of issues to be solved in future iterations (usually referencing comments
> from a pull-request discussion).
>
> Later, this section can optionally list ideas or improvements the author or
> reviewers found during the analysis of this ADR.
# ADR-71 Bank V2
## Status
DRAFT
## Changelog
- 2024-05-08: Initial Draft (@samricotta, @julienrbrt)
## Abstract
The primary objective of refactoring the bank module is to simplify and enhance the functionality of the Cosmos SDK. Over time the bank module has been burdened with numerous responsibilities including transaction handling, account restrictions, delegation counting, and the minting and burning of coins.
In addition to the above, the bank module is currently too rigid and handles too many tasks, so this proposal aims to streamline the module by focusing on core functions `Send`, `Mint`, and `Burn`.
Currently, the module is split across different keepers with scattered and duplicates functionalities (with 4 send functions for instance).
Additionally, the integration of the token factory into the bank module allows for standardization, and better integration within the core modules.
This rewrite will reduce complexity and enhance the efficiency and UX of the bank module.
## Context
The current implementation of the bank module is characterised by its handling of a broad array of functions, leading to significant complexity in using and extending the bank module.
These issues have underscored the need for a refactoring strategy that simplifies the modules architecture and focuses on its most essential operations.
Additionally, there is an overlap in functionality with a Token Factory module, which could be integrated to streamline oper.
## Decision
**Permission Tightening**: Access to the module can be restricted to selected denominations only, ensuring that it operates within designated boundaries and does not exceed its intended scope. Currently, the permissions allow all denoms, so this should be changed. Send restrictions functionality will be maintained.
**Simplification of Logic**: The bank module will focus on core functionalities `Send`, `Mint`, and `Burn`. This refinement aims to streamline the architecture, enhancing both maintainability and performance.
**Integration of Token Factory**: The Token Factory will be merged into the bank module. This consolidation of related functionalities aims to reduce redundancy and enhance coherence within the system. Migrations functions will be provided for migrating from Osmosis' Token Factory module to bank/v2.
**Legacy Support**: A legacy wrapper will be implemented to ensure compatibility with about 90% of existing functions. This measure will facilitate a smooth transition while keeping older systems functional.
**Denom Implementation**: A asset interface will be added to standardise interactions such as transfers, balance inquiries, minting, and burning across different tokens. This will allow the bank module to support arbitrary asset types, enabling developers to implement custom, ERC20-like denominations.
For example, currently if a team would like to extend the transfer method the changes would apply universally, affecting all denoms. With the proposed Asset Interface, it allows teams to customise or extend the transfer method specifically for their own tokens without impacting others.
These improvements are expected to enhance the flexibility of the bank module, allowing for the creation of custom tokens similar to ERC20 standards and assets backed by CosmWasm (CW) contracts. The integration efforts will also aim to unify CW20 with bank coins across the Cosmos chains.
Example of denom interface:
```go
type AssetInterface interface {
Transfer(ctx sdk.Context, from sdk.AccAddress, to sdk.AccAddress, amount sdk.Coin) error
Mint(ctx sdk.Context, to sdk.AccAddress, amount sdk.Coin) error
Burn(ctx sdk.Context, from sdk.AccAddress, amount sdk.Coin) error
QueryBalance(ctx sdk.Context, account sdk.AccAddress) (sdk.Coin, error)
}
```
Overview of flow:
1. Alice initiates a transfer by entering Bob's address and the amount (100 ATOM)
2. The Bank module verifies that the ATOM token implements the `AssetInterface` by querying the `ATOM_Denom_Account`, which is an `x/account` denom account.
3. The Bank module executes the transfer by subtracting 100 ATOM from Alices balance and adding 100 ATOM to Bobs balance.
4. The Bank module calls the Transfer method on the `ATOM_Denom_Account`. The Transfer method, defined in the `AssetInterface`, handles the logic to subtract 100 ATOM from Alices balance and add 100 ATOM to Bobs balance.
5. The Bank module updates the chain and returns the new balances.
6. Both Alice and Bob successfully receive the updated balances.
## Migration Plans
Bank is a widely used module, so getting a v2 needs to be thought thoroughly. In order to not force all dependencies to immediately migrate to bank/v2, the same _upgrading_ path will be taken as for the `gov` module.
This means `cosmossdk.io/bank` will stay one module and there won't be a new `cosmossdk.io/bank/v2` go module. Instead the bank protos will be versioned from `v1beta1` (current bank) to `v2`.
Bank `v1beta1` endpoints will use the new bank v2 implementation for maximum backward compatibility.
The bank `v1beta1` keepers will be deprecated and potentially eventually removed, but its proto and messages definitions will remain.
Additionally, as bank plans to integrate token factory, migrations functions will be provided to migrate from Osmosis token factory implementation (most widely used implementation) to the new bank/v2 token factory.
## Consequences
### Positive
- Simplified interaction with bank APIs
- Backward compatible changes (no contracts or apis broken)
- Optional migration (note: bank `v1beta1` won't get any new feature after bank `v2` release)
### Neutral
- Asset implementation not available cross-chain (IBC-ed custom asset should possibly fallback to the default implementation)
- Many assets may slow down bank balances requests
### Negative
- Temporarily duplicate functionalities as bank `v1beta1` are `v2` are living alongside
- Difficultity to ever completely remove bank `v1beta1`
### References
- Current bank module implementation: https://github.com/cosmos/cosmos-sdk/blob/v0.50.6/x/bank/keeper/keeper.go#L22-L53
- Osmosis token factory: https://github.com/osmosis-labs/osmosis/tree/v25.0.0/x/tokenfactory/keeper

View File

@ -1,685 +0,0 @@
# Cosmos SDK Core Components
## Overview
The Cosmos SDK is a framework for building secure blockchain applications on CometBFT. It provides:
- ABCI implementation in Go
- Multi-store persistence layer
- Transaction routing system
## Transaction Flow
1. CometBFT consensus delivers transaction bytes
2. SDK decodes transactions and extracts messages
3. Messages routed to appropriate modules
4. State changes committed to stores
```mermaid
graph TD
A[CometBFT] -->|Tx Bytes| B[SDK Decode]
B -->|Messages| C[Module Router]
C -->|State Changes| D[Multi-store]
```
## `baseapp`
`baseapp` is the boilerplate implementation of a Cosmos SDK application. It comes with an implementation of the ABCI to handle the connection with the underlying consensus engine. Typically, a Cosmos SDK application extends `baseapp` by embedding it in [`app.go`](../beginner/00-app-anatomy.md#core-application-file).
Here is an example of this from `simapp`, the Cosmos SDK demonstration app:
```go reference
https://github.com/cosmos/cosmos-sdk/blob/v0.52.0-beta.1/simapp/app.go#L145-L186
```
The goal of `baseapp` is to provide a secure interface between the store and the extensible state machine while defining as little about the state machine as possible (staying true to the ABCI).
For more on `baseapp`, please click [here](../advanced/00-baseapp.md).
## Multistore
The Cosmos SDK provides a [`multistore`](../advanced/04-store.md#multistore) for persisting state. The multistore allows developers to declare any number of [`KVStores`](../advanced/04-store.md#base-layer-kvstores). These `KVStores` only accept the `[]byte` type as value and therefore any custom structure needs to be marshalled using [a codec](../advanced/05-encoding.md) before being stored.
The multistore abstraction is used to divide the state in distinct compartments, each managed by its own module. For more on the multistore, click [here](../advanced/04-store.md#multistore).
## Modules
The power of the Cosmos SDK lies in its modularity. Cosmos SDK applications are built by aggregating a collection of interoperable modules. Each module defines a subset of the state and contains its own message/transaction processor, while the Cosmos SDK is responsible for routing each message to its respective module.
Here is a simplified view of how a transaction is processed by the application of each full-node when it is received in a valid block:
```mermaid
flowchart TD
A[Transaction relayed from the full-node's CometBFT engine to the node's application via DeliverTx] --> B[APPLICATION]
B -->|"Using baseapp's methods: Decode the Tx, extract and route the message(s)"| C[Message routed to the correct module to be processed]
C --> D1[AUTH MODULE]
C --> D2[BANK MODULE]
C --> D3[STAKING MODULE]
C --> D4[GOV MODULE]
D1 -->|Handle message, Update state| E["Return result to CometBFT (0=Ok, 1=Err)"]
D2 -->|Handle message, Update state| E["Return result to CometBFT (0=Ok, 1=Err)"]
D3 -->|Handle message, Update state| E["Return result to CometBFT (0=Ok, 1=Err)"]
D4 -->|Handle message, Update state| E["Return result to CometBFT (0=Ok, 1=Err)"]
```
Each module can be seen as a little state-machine. Developers need to define the subset of the state handled by the module, as well as custom message types that modify the state (_Note:_ `messages` are extracted from `transactions` by `baseapp`). In general, each module declares its own `KVStore` in the `multistore` to persist the subset of the state it defines. Most developers will need to access other 3rd party modules when building their own modules. Given that the Cosmos SDK is an open framework, some of the modules may be malicious, which means there is a need for security principles to reason about inter-module interactions. These principles are based on [object-capabilities](../advanced/10-ocap.md). In practice, this means that instead of having each module keep an access control list for other modules, each module implements special objects called `keepers` that can be passed to other modules to grant a pre-defined set of capabilities.
Cosmos SDK modules are defined in the `x/` folder of the Cosmos SDK. Some core modules include:
- `x/auth`: Used to manage accounts and signatures.
- `x/bank`: Used to enable tokens and token transfers.
- `x/staking` + `x/slashing`: Used to build Proof-of-Stake blockchains.
In addition to the already existing modules in `x/`, which anyone can use in their app, the Cosmos SDK lets you build your own custom modules. You can check an [example of that in the tutorial](https://tutorials.cosmos.network/).# Keepers
:::note Synopsis
`Keeper`s refer to a Cosmos SDK abstraction whose role is to manage access to the subset of the state defined by various modules. `Keeper`s are module-specific, i.e. the subset of state defined by a module can only be accessed by a `keeper` defined in said module. If a module needs to access the subset of state defined by another module, a reference to the second module's internal `keeper` needs to be passed to the first one. This is done in `app.go` during the instantiation of module keepers.
:::
:::note Pre-requisite Readings
- [Introduction to Cosmos SDK Modules](./00-intro.md)
:::
## Motivation
The Cosmos SDK is a framework that makes it easy for developers to build complex decentralized applications from scratch, mainly by composing modules together. As the ecosystem of open-source modules for the Cosmos SDK expands, it will become increasingly likely that some of these modules contain vulnerabilities, as a result of the negligence or malice of their developer.
The Cosmos SDK adopts an [object-capabilities-based approach](https://docs.cosmos.network/main/learn/advanced/ocap#ocaps-in-practice) to help developers better protect their application from unwanted inter-module interactions, and `keeper`s are at the core of this approach. A `keeper` can be considered quite literally to be the gatekeeper of a module's store(s). Each store (typically an [`IAVL` Store](../../learn/advanced/04-store.md#iavl-store)) defined within a module comes with a `storeKey`, which grants unlimited access to it. The module's `keeper` holds this `storeKey` (which should otherwise remain unexposed), and defines [methods](#implementing-methods) for reading and writing to the store(s).
The core idea behind the object-capabilities approach is to only reveal what is necessary to get the work done. In practice, this means that instead of handling permissions of modules through access-control lists, module `keeper`s are passed a reference to the specific instance of the other modules' `keeper`s that they need to access (this is done in the [application's constructor function](../../learn/beginner/00-app-anatomy.md#constructor-function)). As a consequence, a module can only interact with the subset of state defined in another module via the methods exposed by the instance of the other module's `keeper`. This is a great way for developers to control the interactions that their own module can have with modules developed by external developers.
## Type Definition
`keeper`s are generally implemented in a `/keeper/keeper.go` file located in the module's folder. By convention, the type `keeper` of a module is simply named `Keeper` and usually follows the following structure:
```go
type Keeper struct {
// External keepers, if any
// Store key(s)
// codec
// authority
}
```
For example, here is the type definition of the `keeper` from the `staking` module:
```go reference
https://github.com/cosmos/cosmos-sdk/blob/v0.52.0-beta.1/x/staking/keeper/keeper.go#L54-L115
```
Let us go through the different parameters:
- An expected `keeper` is a `keeper` external to a module that is required by the internal `keeper` of said module. External `keeper`s are listed in the internal `keeper`'s type definition as interfaces. These interfaces are themselves defined in an `expected_keepers.go` file in the root of the module's folder. In this context, interfaces are used to reduce the number of dependencies, as well as to facilitate the maintenance of the module itself.
- `KVStoreService`s grant access to the store(s) of the [multistore](../../learn/advanced/04-store.md) managed by the module. They should always remain unexposed to external modules.
- `cdc` is the [codec](../../learn/advanced/05-encoding.md) used to marshal and unmarshal structs to/from `[]byte`. The `cdc` can be any of `codec.BinaryCodec`, `codec.JSONCodec` or `codec.Codec` based on your requirements. It can be either a proto or amino codec as long as they implement these interfaces.
- The authority listed is a module account or user account that has the right to change module level parameters. Previously this was handled by the param module, which has been deprecated.
Of course, it is possible to define different types of internal `keeper`s for the same module (e.g. a read-only `keeper`). Each type of `keeper` comes with its own constructor function, which is called from the [application's constructor function](../../learn/beginner/00-app-anatomy.md). This is where `keeper`s are instantiated, and where developers make sure to pass correct instances of modules' `keeper`s to other modules that require them.
## Implementing Methods
`Keeper`s primarily expose methods for business logic, as validity checks should have already been performed by the [`Msg` server](./03-msg-services.md) when `keeper`s' methods are called.
<!-- markdown-link-check-disable -->
State management is recommended to be done via [Collections](../packages/collections)
<!-- The above link is created via the script to generate docs -->
## State Management
In the Cosmos SDK, it is crucial to be methodical and selective when managing state within a module, as improper state management can lead to inefficiency, security risks, and scalability issues. Not all data belongs in the on-chain state; it's important to store only essential blockchain data that needs to be verified by consensus. Storing unnecessary information, especially client-side data, can bloat the state and slow down performance. Instead, developers should focus on using an off-chain database to handle supplementary data, extending the API as needed. This approach minimizes on-chain complexity, optimizes resource usage, and keeps the blockchain state lean and efficient, ensuring scalability and smooth operations.
The Cosmos SDK leverages Protocol Buffers (protobuf) for efficient state management, providing a well-structured, binary encoding format that ensures compatibility and performance across different modules. The SDKs recommended approach for managing state is through the [collections package](../pacakges/02-collections.md), which simplifies state handling by offering predefined data structures like maps and indexed sets, reducing the complexity of managing raw state data. While users can opt for custom encoding schemes if they need more flexibility or have specialized requirements, they should be aware that such custom implementations may not integrate seamlessly with indexers that decode state data on the fly. This could lead to challenges in data retrieval, querying, and interoperability, making protobuf a safer and more future-proof choice for most use cases.
# Folder Structure
:::note Synopsis
This document outlines the structure of Cosmos SDK modules. These ideas are meant to be applied as suggestions. Application developers are encouraged to improve upon and contribute to module structure and development design.
The required interface for a module is located in the module.go. Everything beyond this is suggestive.
:::
## Structure
A typical Cosmos SDK module can be structured as follows:
```shell
proto
└── {project_name}
   └── {module_name}
   └── {proto_version}
      ├── {module_name}.proto
      ├── genesis.proto
      ├── query.proto
      └── tx.proto
```
- `{module_name}.proto`: The module's common message type definitions.
- `genesis.proto`: The module's message type definitions related to genesis state.
- `query.proto`: The module's Query service and related message type definitions.
- `tx.proto`: The module's Msg service and related message type definitions.
```shell
x/{module_name}
├── client
│   ├── cli
│   │ ├── query.go
│   │   └── tx.go
│   └── testutil
│   ├── cli_test.go
│   └── suite.go
├── exported
│   └── exported.go
├── keeper
│   ├── genesis.go
│   ├── grpc_query.go
│   ├── hooks.go
│   ├── invariants.go
│   ├── keeper.go
│   ├── keys.go
│   ├── msg_server.go
│   └── querier.go
├── simulation
│   ├── decoder.go
│   ├── genesis.go
│   ├── operations.go
│   └── params.go
├── types
│   ├── {module_name}.pb.go
│ ├── codec.go
│ ├── errors.go
│ ├── events.go
│ ├── events.pb.go
│ ├── expected_keepers.go
│ ├── genesis.go
│ ├── genesis.pb.go
│ ├── keys.go
│ ├── msgs.go
│ ├── params.go
│ ├── query.pb.go
│ └── tx.pb.go
├── module.go
├── abci.go
├── autocli.go
├── depinject.go
└── README.md
```
- `client/`: The module's CLI client functionality implementation and the module's CLI testing suite.
- `exported/`: The module's exported types - typically interface types. If a module relies on keepers from another module, it is expected to receive the keepers as interface contracts through the `expected_keepers.go` file (see below) in order to avoid a direct dependency on the module implementing the keepers. However, these interface contracts can define methods that operate on and/or return types that are specific to the module that is implementing the keepers and this is where `exported/` comes into play. The interface types that are defined in `exported/` use canonical types, allowing for the module to receive the keepers as interface contracts through the `expected_keepers.go` file. This pattern allows for code to remain DRY and also alleviates import cycle chaos.
- `keeper/`: The module's `Keeper` and `MsgServer` implementation.
- `abci.go`: The module's `BeginBlocker` and `EndBlocker` implementations (this file is only required if `BeginBlocker` and/or `EndBlocker` need to be defined).
- `simulation/`: The module's [simulation](./14-simulator.md) package defines functions used by the blockchain simulator application (`simapp`).
- `README.md`: The module's specification documents outlining important concepts, state storage structure, and message and event type definitions. Learn more how to write module specs in the [spec guidelines](../spec/SPEC_MODULE.md).
- `types/`: includes type definitions for messages, events, and genesis state, including the type definitions generated by Protocol Buffers.
- `codec.go`: The module's registry methods for interface types.
- `errors.go`: The module's sentinel errors.
- `events.go`: The module's event types and constructors.
- `expected_keepers.go`: The module's [expected keeper](./06-keeper.md#type-definition) interfaces.
- `genesis.go`: The module's genesis state methods and helper functions.
- `keys.go`: The module's store keys and associated helper functions.
- `msgs.go`: The module's message type definitions and associated methods.
- `params.go`: The module's parameter type definitions and associated methods.
- `*.pb.go`: The module's type definitions generated by Protocol Buffers (as defined in the respective `*.proto` files above).
- The root directory includes the module's `AppModule` implementation.
- `autocli.go`: The module [autocli](https://docs.cosmos.network/main/core/autocli) options.
- `depinject.go`: The module [depinject](./15-depinject.md#type-definition) options.
> Note: although the above pattern is followed by most of the Cosmos SDK modules, there are some modules that don't follow this pattern. E.g `x/group` and `x/nft` dont have a `types` folder, instead all of the type definitions for messages, events, and genesis state are live in the root directory and the module's `AppModule` implementation lives in the `module` folder.
---
## sidebar_position: 1
# `Msg` Services
:::note Synopsis
A Protobuf `Msg` service processes [messages](./02-messages-and-queries.md#messages). Protobuf `Msg` services are specific to the module in which they are defined, and only process messages defined within the said module. They are called from `BaseApp` during [`FinalizeBlock`](../../learn/advanced/00-baseapp.md#finalizeblock).
:::
:::note Pre-requisite Readings
- [Module Manager](./01-module-manager.md)
- [Messages and Queries](./02-messages-and-queries.md)
:::
## Implementation of a module `Msg` service
Each module should define a Protobuf `Msg` service, which will be responsible for processing requests (implementing `sdk.Msg`) and returning responses.
As further described in [ADR 031](../architecture/adr-031-msg-service.md), this approach has the advantage of clearly specifying return types and generating server and client code.
Protobuf generates a `MsgServer` interface based on the definition of `Msg` service. It is the role of the module developer to implement this interface, by implementing the state transition logic that should happen upon receival of each `transaction.Msg`. As an example, here is the generated `MsgServer` interface for `x/bank`, which exposes two `transaction.Msg`s:
```go reference
https://github.com/cosmos/cosmos-sdk/blob/28fa3b8/x/bank/types/tx.pb.go#L564-L579
```
When possible, the existing module's [`Keeper`](./06-keeper.md) should implement `MsgServer`, otherwise a `msgServer` struct that embeds the `Keeper` can be created, typically in `./keeper/msg_server.go`:
```go reference
https://github.com/cosmos/cosmos-sdk/blob/28fa3b8/x/bank/keeper/msg_server.go#L16-L19
```
`msgServer` methods can retrieve the auxiliary information or services using the environment variable, it is always located in the keeper:
Environment:
```go reference
https://github.com/cosmos/cosmos-sdk/blob/07151304e2ec6a185243d083f59a2d543253cb15/core/appmodule/v2/environment.go#L14-L29
```
Keeper Example:
```go reference
https://github.com/cosmos/cosmos-sdk/blob/07151304e2ec6a185243d083f59a2d543253cb15/x/bank/keeper/keeper.go#L56-L58
```
`transaction.Msg` processing usually follows these 3 steps:
### Validation
The message server must perform all validation required (both _stateful_ and _stateless_) to make sure the `message` is valid.
The `signer` is charged for the gas cost of this validation.
For example, a `msgServer` method for a `transfer` message should check that the sending account has enough funds to actually perform the transfer.
It is recommended to implement all validation checks in a separate function that passes state values as arguments. This implementation simplifies testing. As expected, expensive validation functions charge additional gas. Example:
```go
ValidateMsgA(msg MsgA, now Time, gm GasMeter) error {
if now.Before(msg.Expire) {
return sdkerrors.ErrInvalidRequest.Wrap("msg expired")
}
gm.ConsumeGas(1000, "signature verification")
return signatureVerificaton(msg.Prover, msg.Data)
}
```
:::warning
Previously, the `ValidateBasic` method was used to perform simple and stateless validation checks.
This way of validating is deprecated, this means the `msgServer` must perform all validation checks.
:::
### State Transition
After the validation is successful, the `msgServer` method uses the [`keeper`](./06-keeper.md) functions to access the state and perform a state transition.
### Events
Before returning, `msgServer` methods generally emit one or more [events](../../learn/advanced/08-events.md) by using the `EventManager` held in `environment`.
There are two ways to emit events, typed events using protobuf or arbitrary key & values.
Typed Events:
```go
ctx.EventManager().EmitTypedEvent(
&group.EventABC{Key1: Value1, Key2, Value2})
```
Arbitrary Events:
```go
ctx.EventManager().EmitEvent(
sdk.NewEvent(
eventType, // e.g. sdk.EventTypeMessage for a message, types.CustomEventType for a custom event defined in the module
sdk.NewAttribute(key1, value1),
sdk.NewAttribute(key2, value2),
),
)
```
These events are relayed back to the underlying consensus engine and can be used by service providers to implement services around the application. Click [here](../../learn/advanced/08-events.md) to learn more about events.
The invoked `msgServer` method returns a `proto.Message` response and an `error`. These return values are then wrapped into an `*sdk.Result` or an `error`:
```go reference
https://github.com/cosmos/cosmos-sdk/blob/v0.50.0-alpha.0/baseapp/msg_service_router.go#L160
```
This method takes care of marshaling the `res` parameter to protobuf and attaching any events on the `EventManager()` to the `sdk.Result`.
```protobuf reference
https://github.com/cosmos/cosmos-sdk/blob/v0.50.0-alpha.0/proto/cosmos/base/abci/v1beta1/abci.proto#L93-L113
```
This diagram shows a typical structure of a Protobuf `Msg` service, and how the message propagates through the module.
```mermaid
sequenceDiagram
participant User
participant baseApp
participant router
participant handler
participant msgServer
participant keeper
participant EventManager
User->>baseApp: Transaction Type<Tx>
baseApp->>router: Route(ctx, msgRoute)
router->>handler: handler
handler->>msgServer: Msg<Tx>(Context, Msg(..))
alt addresses invalid, denominations wrong, etc.
msgServer->>handler: error
handler->>router: error
router->>baseApp: result, error code
else
msgServer->>keeper: perform action, update context
keeper->>msgServer: results, error code
msgServer->>EventManager: Emit relevant events
msgServer->>msgServer: maybe wrap results in more structure
msgServer->>handler: result, error code
handler->>router: result, error code
router->>baseApp: result, error code
end
baseApp->>User: result, error code
```
## Telemetry
New [telemetry metrics](../../learn/advanced/09-telemetry.md) can be created from `msgServer` methods when handling messages.
This is an example from the `x/auth/vesting` module:
```go reference
https://github.com/cosmos/cosmos-sdk/blob/v0.50.0-alpha.0/x/auth/vesting/msg_server.go#L76-L88
```
:::Warning
Telemetry adds a performance overhead to the chain. It is recommended to only use this in critical paths
:::
---
## sidebar_position: 1
# Query Services
:::note Synopsis
A Protobuf Query service processes [`queries`](./02-messages-and-queries.md#queries). Query services are specific to the module in which they are defined, and only process `queries` defined within said module. They are called from `BaseApp`'s [`Query` method](../../learn/advanced/00-baseapp.md#query).
:::
:::note Pre-requisite Readings
- [Module Manager](./01-module-manager.md)
- [Messages and Queries](./02-messages-and-queries.md)
:::
## Implementation of a module query service
### gRPC Service
When defining a Protobuf `Query` service, a `QueryServer` interface is generated for each module with all the service methods:
```go
type QueryServer interface {
QueryBalance(context.Context, *QueryBalanceParams) (*types.Coin, error)
QueryAllBalances(context.Context, *QueryAllBalancesParams) (*QueryAllBalancesResponse, error)
}
```
These custom queries methods should be implemented by a module's keeper, typically in `./keeper/grpc_query.go`. The first parameter of these methods is a generic `context.Context`. Therefore, the Cosmos SDK provides a function `sdk.UnwrapSDKContext` to retrieve the `context.Context` from the provided
`context.Context`.
Here's an example implementation for the bank module:
```go reference
https://github.com/cosmos/cosmos-sdk/blob/v0.50.0-alpha.0/x/bank/keeper/grpc_query.go
```
### Calling queries from the State Machine
The Cosmos SDK v0.47 introduces a new `cosmos.query.v1.module_query_safe` Protobuf annotation which is used to state that a query that is safe to be called from within the state machine, for example:
- a Keeper's query function can be called from another module's Keeper,
- ADR-033 intermodule query calls,
- CosmWasm contracts can also directly interact with these queries.
If the `module_query_safe` annotation set to `true`, it means:
- The query is deterministic: given a block height it will return the same response upon multiple calls, and doesn't introduce any state-machine breaking changes across SDK patch versions.
- Gas consumption never fluctuates across calls and across patch versions.
If you are a module developer and want to use `module_query_safe` annotation for your own query, you have to ensure the following things:
- the query is deterministic and won't introduce state-machine-breaking changes without coordinated upgrades
- it has its gas tracked, to avoid the attack vector where no gas is accounted for
on potentially high-computation queries.
***
sidebar_position: 1
---
# Blockchain Architecture
## Introduction
Blockchain architecture is a complex topic that involves many different components. In this section, we will cover the main layers of a blockchain application built with the Cosmos SDK.
At its core, a blockchain is a replicated deterministic state machine. This document explores the various layers of blockchain architecture, focusing on the execution, settlement, consensus, data availability, and interoperability layers.
```mermaid
graph TD
A[Modular SDK Blockchain Architecture]
A --> B[Execution Layer]
A --> C[Settlement Layer]
A --> D[Consensus Layer]
D --> E[Data Availability Layer]
A --> F[Interoperability Layer]
```
## Layered Architecture
Understanding blockchain architecture through the lens of different layers helps in comprehending its complex functionalities. We will give a high-level overview of the execution layer, settlement layer, consensus layer, data availability layer, and interoperability layer.
## Execution Layer
The Execution Layer is where the blockchain processes and executes transactions. The state machine within the blockchain handles the execution of transaction logic. This is done by the blockchain itself, ensuring that every transaction follows the predefined rules and state transitions. When a transaction is submitted, the execution layer processes it, updates the state, and ensures that the output is deterministic and consistent across all nodes. In the context of the Cosmos SDK, this typically involves predefined modules and transaction types rather than general-purpose smart contracts, which are used in chains with CosmWasm.
### State machine
At its core, a blockchain is a [replicated deterministic state machine](https://en.wikipedia.org/wiki/State_machine_replication).
A state machine is a computer science concept whereby a machine can have multiple states, but only one at any given time. There is a `state`, which describes the current state of the system, and `transactions`, that trigger state transitions.
Given a state S and a transaction T, the state machine will return a new state S'.
```mermaid
flowchart LR
A[S]
B[S']
A -->|"apply(T)"| B
```
In practice, the transactions are bundled in blocks to make the process more efficient. Given a state S and a block of transactions B, the state machine will return a new state S'.
```mermaid
flowchart LR
A[S]
B[S']
A -->|"For each T in B: apply(T)"| B
```
In a blockchain context, the state machine is [deterministic](https://en.wikipedia.org/wiki/Deterministic_system). This means that if a node is started at a given state and replays the same sequence of transactions, it will always end up with the same final state.
The Cosmos SDK gives developers maximum flexibility to define the state of their application, transaction types and state transition functions. The process of building state machines with the Cosmos SDK will be described more in-depth in the following sections. But first, let us see how the state machine is replicated using various consensus engines, such as CometBFT.
## Settlement Layer
The Settlement Layer is responsible for finalising and recording transactions on the blockchain. This layer ensures that all transactions are accurately settled and immutable, providing a verifiable record of all activities on the blockchain. It is critical for maintaining the integrity and trustworthiness of the blockchain.
The settlement layer can be performed on the chain itself or it can be externalised, allowing for the possibility of plugging in a different settlement layer as needed. For example if we were to use Rollkit and celestia for our Data Availability and Consensus, we could separate our settlement layer by introducing fraud or validity proofs. From there the settlement layer can create trust-minimised light clients, further enhancing security and efficiency. This process ensures that all transactions are accurately finalized and immutable, providing a verifiable record of all activities.
## Consensus Layer
The Consensus Layer ensures that all nodes in the network agree on the order and validity of transactions. This layer uses consensus algorithms like Byzantine Fault Tolerance (BFT) or Proof of Stake (PoS) to achieve agreement, even in the presence of malicious nodes. Consensus is crucial for maintaining the security and reliability of the blockchain.
What has been a default consensus engine in the Cosmos SDK has been CometBFT. In the most recent releases we have been moving away from this and allowing users to plug and play their own consensus engines. This is a big step forward for the Cosmos SDK as it allows for more flexibility and customisation. Other consensus engine options for example can be Rollkit with Celestias Data Availability Layer.
Here is an example of how the consensus layer works with CometBFT in the context of the Cosmos SDK:
### CometBFT
Thanks to the Cosmos SDK, developers just have to define the state machine, and [_CometBFT_](https://docs.cometbft.com/v1.0/explanation/introduction/) will handle replication over the network for them.
```mermaid
flowchart TD
subgraph Blockchain_Node[Blockchain Node]
subgraph SM[State-machine]
direction TB
SM1[Cosmos SDK]
end
subgraph CometBFT[CometBFT]
direction TB
Consensus
Networking
end
end
SM <--> CometBFT
Blockchain_Node -->|Includes| SM
Blockchain_Node -->|Includes| CometBFT
```
[CometBFT](https://docs.cometbft.com/v1.0/explanation/introduction/) is an application-agnostic engine that is responsible for handling the _networking_ and _consensus_ layers of a blockchain. In practice, this means that CometBFT is responsible for propagating and ordering transaction bytes. CometBFT relies on an eponymous Byzantine-Fault-Tolerant (BFT) algorithm to reach consensus on the order of transactions.
The [consensus algorithm adopted by CometBFT](https://docs.cometbft.com/v1.0/explanation/introduction/#consensus-overview) works with a set of special nodes called _Validators_. Validators are responsible for adding blocks of transactions to the blockchain. At any given block, there is a validator set V. A validator in V is chosen by the algorithm to be the proposer of the next block. This block is considered valid if more than two thirds of V signed a `prevote` and a `precommit` on it, and if all the transactions that it contains are valid. The validator set can be changed by rules written in the state-machine.
## ABCI
CometBFT passes transactions to the application through an interface called the [ABCI](https://docs.cometbft.com/v1.0/spec/abci/), which the application must implement.
```mermaid
graph TD
A[Application]
B[CometBFT]
A <-->|ABCI| B
```
Note that **CometBFT only handles transaction bytes**. It has no knowledge of what these bytes mean. All CometBFT does is order these transaction bytes deterministically. CometBFT passes the bytes to the application via the ABCI, and expects a return code to inform it if the messages contained in the transactions were successfully processed or not.
Here are the most important messages of the ABCI:
- `CheckTx`: When a transaction is received by CometBFT, it is passed to the application to check if a few basic requirements are met. `CheckTx` is used to protect the mempool of full-nodes against spam transactions. A special handler called the [`AnteHandler`](../beginner/04-gas-fees.md#antehandler) is used to execute a series of validation steps such as checking for sufficient fees and validating the signatures. If the checks are valid, the transaction is added to the [mempool](https://docs.cometbft.com/v1.0/explanation/core/mempool) and relayed to peer nodes. Note that transactions are not processed (i.e. no modification of the state occurs) with `CheckTx` since they have not been included in a block yet.
- `DeliverTx`: When a [valid block](https://docs.cometbft.com/v1.0/spec/core/data_structures#block) is received by CometBFT, each transaction in the block is passed to the application via `DeliverTx` in order to be processed. It is during this stage that the state transitions occur. The `AnteHandler` executes again, along with the actual [`Msg` service](../../build/building-modules/03-msg-services.md) RPC for each message in the transaction.
- `BeginBlock`/`EndBlock`: These messages are executed at the beginning and the end of each block, whether the block contains transactions or not. It is useful to trigger automatic execution of logic. Proceed with caution though, as computationally expensive loops could slow down your blockchain, or even freeze it if the loop is infinite.
Find a more detailed view of the ABCI methods from the [CometBFT docs](https://docs.cometbft.com/v1.0/spec/abci/).
Any application built on CometBFT needs to implement the ABCI interface in order to communicate with the underlying local CometBFT engine. Fortunately, you do not have to implement the ABCI interface. The Cosmos SDK provides a boilerplate implementation of it in the form of [baseapp](./03-sdk-design.md#baseapp).
## Data Availability Layer
The Data Availability (DA) Layer is a critical component of within the umbrella of the consensus layer that ensures all necessary data for transactions is available to all network participants. This layer is essential for preventing data withholding attacks, where some nodes might attempt to disrupt the network by not sharing critical transaction data.
If we use the example of Rollkit, a user initiates a transaction, which is then propagated through the rollup network by a light node. The transaction is validated by full nodes and aggregated into a block by the sequencer. This block is posted to a data availability layer like Celestia, ensuring the data is accessible and correctly ordered. The rollup light node verifies data availability from the DA layer. Full nodes then validate the block and generate necessary proofs, such as fraud proofs for optimistic rollups or zk-SNARKs/zk-STARKs for zk-rollups. These proofs are shared across the network and verified by other nodes, ensuring the rollup's integrity. Once all validations are complete, the rollup's state is updated, finalising the transaction
## Interoperability Layer
The Interoperability Layer enables communication and interaction between different blockchains. This layer facilitates cross-chain transactions and data sharing, allowing various blockchain networks to interoperate seamlessly. Interoperability is key for building a connected ecosystem of blockchains, enhancing their functionality and reach.
In this case we have separated the layers even further to really illustrate the components that make-up the blockchain architecture and it is important to note that the Cosmos SDK is designed to be interoperable with other blockchains. This is achieved through the use of the [Inter-Blockchain Communication (IBC) protocol](https://www.ibcprotocol.dev/), which allows different blockchains to communicate and transfer assets between each other.
---
## sidebar_position: 1
# Application-Specific Blockchains
:::note Synopsis
This document explains what application-specific blockchains are, and why developers would want to build one as opposed to writing Smart Contracts.
:::
## What are application-specific blockchains
Application-specific blockchains are blockchains customized to operate a single application. Instead of building a decentralized application on top of an underlying blockchain like Ethereum, developers build their own blockchain from the ground up. This means building a full-node client, a light-client, and all the necessary interfaces (CLI, REST, ...) to interact with the nodes.
```mermaid
flowchart TD
subgraph Blockchain_Node[Blockchain Node]
subgraph SM[State-machine]
direction TB
SM1[Cosmos SDK]
end
subgraph Consensus[Consensus]
direction TB
end
subgraph Networking[Networking]
direction TB
end
end
SM <--> Consensus
Consensus <--> Networking
Blockchain_Node -->|Includes| SM
Blockchain_Node -->|Includes| Consensus
Blockchain_Node -->|Includes| Networking
```
## What are the shortcomings of Smart Contracts
Virtual-machine blockchains like Ethereum addressed the demand for more programmability back in 2014. At the time, the options available for building decentralized applications were quite limited. Most developers would build on top of the complex and limited Bitcoin scripting language, or fork the Bitcoin codebase which was hard to work with and customize.
Virtual-machine blockchains came in with a new value proposition. Their state-machine incorporates a virtual-machine that is able to interpret turing-complete programs called Smart Contracts. These Smart Contracts are very good for use cases like one-time events (e.g. ICOs), but they can fall short for building complex decentralized platforms. Here is why:
- Smart Contracts are generally developed with specific programming languages that can be interpreted by the underlying virtual-machine. These programming languages are often immature and inherently limited by the constraints of the virtual-machine itself. For example, the Ethereum Virtual Machine does not allow developers to implement automatic execution of code. Developers are also limited to the account-based system of the EVM, and they can only choose from a limited set of functions for their cryptographic operations. These are examples, but they hint at the lack of **flexibility** that a smart contract environment often entails.
- Smart Contracts are all run by the same virtual machine. This means that they compete for resources, which can severely restrain **performance**. And even if the state-machine were to be split in multiple subsets (e.g. via sharding), Smart Contracts would still need to be interpreted by a virtual machine, which would limit performance compared to a native application implemented at state-machine level (our benchmarks show an improvement on the order of 10x in performance when the virtual-machine is removed).
- Another issue with the fact that Smart Contracts share the same underlying environment is the resulting limitation in **sovereignty**. A decentralized application is an ecosystem that involves multiple players. If the application is built on a general-purpose virtual-machine blockchain, stakeholders have very limited sovereignty over their application, and are ultimately superseded by the governance of the underlying blockchain. If there is a bug in the application, very little can be done about it.
Application-Specific Blockchains are designed to address these shortcomings.
## Application-Specific Blockchains Benefits
### Flexibility
Application-specific blockchains give maximum flexibility to developers:
- In Cosmos blockchains, the state-machine is typically connected to the underlying consensus engine via an interface called the [ABCI](https://docs.cometbft.com/v1.0/spec/abci/) (Application Blockchain Interface). This interface can be wrapped in any programming language, meaning developers can build their state-machine in the programming language of their choice.
- Developers can choose among multiple frameworks to build their state-machine. The most widely used today is the Cosmos SDK, but others exist (e.g. [Lotion](https://github.com/nomic-io/lotion), [Weave](https://github.com/iov-one/weave), ...). Typically the choice will be made based on the programming language they want to use (Cosmos SDK and Weave are in Golang, Lotion is in Javascript, ...).
- The ABCI also allows developers to swap the consensus engine of their application-specific blockchain. Today, only CometBFT is production-ready, but in the future other consensus engines are expected to emerge.
- Even when they settle for a framework and consensus engine, developers still have the freedom to tweak them if they don't perfectly match their requirements in their pristine forms.
- Developers are free to explore the full spectrum of tradeoffs (e.g. number of validators vs transaction throughput, safety vs availability in asynchrony, ...) and design choices (DB or IAVL tree for storage, UTXO or account model, ...).
- Developers can implement automatic execution of code. In the Cosmos SDK, logic can be automatically triggered at the beginning and the end of each block. They are also free to choose the cryptographic library used in their application, as opposed to being constrained by what is made available by the underlying environment in the case of virtual-machine blockchains.
The list above contains a few examples that show how much flexibility application-specific blockchains give to developers. The goal of Cosmos and the Cosmos SDK is to make developer tooling as generic and composable as possible, so that each part of the stack can be forked, tweaked and improved without losing compatibility. As the community grows, more alternatives for each of the core building blocks will emerge, giving more options to developers.
### Performance
Decentralized applications built with Smart Contracts are inherently capped in performance by the underlying environment. For a decentralized application to optimise performance, it needs to be built as an application-specific blockchain. Next are some of the benefits an application-specific blockchain brings in terms of performance:
- Developers of application-specific blockchains can choose to operate with a novel consensus engine such as CometBFT.
- An application-specific blockchain only operates a single application, so that the application does not compete with others for computation and storage. This is the opposite of most non-sharded virtual-machine blockchains today, where smart contracts all compete for computation and storage.
- Even if a virtual-machine blockchain offered application-based sharding coupled with an efficient consensus algorithm, performance would still be limited by the virtual-machine itself. The real throughput bottleneck is the state-machine, and requiring transactions to be interpreted by a virtual-machine significantly increases the computational complexity of processing them.
### Security
Security is hard to quantify, and greatly varies from platform to platform. That said here are some important benefits an application-specific blockchain can bring in terms of security:
- Developers can choose proven programming languages like Go when building their application-specific blockchains, as opposed to smart contract programming languages that are often more immature.
- Developers are not constrained by the cryptographic functions made available by the underlying virtual-machines. They can use their own custom cryptography, and rely on well-audited crypto libraries.
- Developers do not have to worry about potential bugs or exploitable mechanisms in the underlying virtual-machine, making it easier to reason about the security of the application.
### Sovereignty
One of the major benefits of application-specific blockchains is sovereignty. A decentralized application is an ecosystem that involves many actors: users, developers, third-party services, and more. When developers build on virtual-machine blockchain where many decentralized applications coexist, the community of the application is different than the community of the underlying blockchain, and the latter supersedes the former in the governance process. If there is a bug or if a new feature is needed, stakeholders of the application have very little leeway to upgrade the code. If the community of the underlying blockchain refuses to act, nothing can happen.
The fundamental issue here is that the governance of the application and the governance of the network are not aligned. This issue is solved by application-specific blockchains. Because application-specific blockchains specialize to operate a single application, stakeholders of the application have full control over the entire chain. This ensures that the community will not be stuck if a bug is discovered, and that it has the freedom to choose how it is going to evolve.

View File

@ -1,40 +0,0 @@
# Interchain Accounts
:::note Synopsis
Learn about what the Interchain Accounts module is
:::
## What is the Interchain Accounts module?
Interchain Accounts is the Cosmos SDK implementation of the ICS-27 protocol, which enables cross-chain account management built upon IBC.
- How does an interchain account differ from a regular account?
Regular accounts use a private key to sign transactions. Interchain Accounts are instead controlled programmatically by counterparty chains via IBC packets.
## Concepts
`Host Chain`: The chain where the interchain account is registered. The host chain listens for IBC packets from a controller chain which should contain instructions (e.g. Cosmos SDK messages) for which the interchain account will execute.
`Controller Chain`: The chain registering and controlling an account on a host chain. The controller chain sends IBC packets to the host chain to control the account.
`Interchain Account`: An account on a host chain created using the ICS-27 protocol. An interchain account has all the capabilities of a normal account. However, rather than signing transactions with a private key, a controller chain will send IBC packets to the host chain which signals what transactions the interchain account should execute.
`Authentication Module`: A custom application module on the controller chain that uses the Interchain Accounts module to build custom logic for the creation & management of interchain accounts. It can be either an IBC application module using the [legacy API](10-legacy/03-keeper-api.md), or a regular Cosmos SDK application module sending messages to the controller submodule's `MsgServer` (this is the recommended approach from ibc-go v6 if access to packet callbacks is not needed). Please note that the legacy API will eventually be removed and IBC applications will not be able to use them in later releases.
## SDK security model
SDK modules on a chain are assumed to be trustworthy. For example, there are no checks to prevent an untrustworthy module from accessing the bank keeper.
The implementation of ICS-27 in ibc-go uses this assumption in its security considerations.
The implementation assumes other IBC application modules will not bind to ports within the ICS-27 namespace.
## Channel Closure
The provided interchain account host and controller implementations do not support `ChanCloseInit`. However, they do support `ChanCloseConfirm`.
This means that the host and controller modules cannot close channels, but they will confirm channel closures initiated by other implementations of ICS-27.
In the event of a channel closing (due to a packet timeout in an ordered channel, for example), the interchain account associated with that channel can become accessible again if a new channel is created with a (JSON-formatted) version string that encodes the exact same `Metadata` information of the previous channel. The channel can be reopened using either [`MsgRegisterInterchainAccount`](./05-messages.md#msgregisterinterchainaccount) or `MsgChannelOpenInit`. If `MsgRegisterInterchainAccount` is used, then it is possible to leave the `version` field of the message empty, since it will be filled in by the controller submodule. If `MsgChannelOpenInit` is used, then the `version` field must be provided with the correct JSON-encoded `Metadata` string. See section [Understanding Active Channels](./09-active-channels.md#understanding-active-channels) for more information.
When reopening a channel with the default controller submodule, the ordering of the channel cannot be changed. In order to change the ordering of the channel, the channel has to go through a [channel upgrade handshake](../../01-ibc/06-channel-upgrades.md) or reopen the channel with a custom controller implementation.

View File

@ -1,310 +0,0 @@
---
title: Overview
---
# Overview
:::note Synopsis
Learn about what the Fee Middleware module is, and how to build custom modules that utilize the Fee Middleware functionality
:::
## What is the Fee Middleware module?
IBC does not depend on relayer operators for transaction verification. However, the relayer infrastructure ensures liveness of the Interchain network — operators listen for packets sent through channels opened between chains, and perform the vital service of ferrying these packets (and proof of the transaction on the sending chain/receipt on the receiving chain) to the clients on each side of the channel.
Though relaying is permissionless and completely decentralized and accessible, it does come with operational costs. Running full nodes to query transaction proofs and paying for transaction fees associated with IBC packets are two of the primary cost burdens which have driven the overall discussion on **a general, in-protocol incentivization mechanism for relayers**.
Initially, a [simple proposal](https://github.com/cosmos/ibc/pull/577/files) was created to incentivize relaying on ICS20 token transfers on the destination chain. However, the proposal was specific to ICS20 token transfers and would have to be reimplemented in this format on every other IBC application module.
After much discussion, the proposal was expanded to a [general incentivisation design](https://github.com/cosmos/ibc/tree/master/spec/app/ics-029-fee-payment) that can be adopted by any ICS application protocol as [middleware](../../01-ibc/04-middleware/02-develop.md).
## Concepts
ICS29 fee payments in this middleware design are built on the assumption that sender chains are the source of incentives — the chain on which packets are incentivized is the chain that distributes fees to relayer operators. However, as part of the IBC packet flow, messages have to be submitted on both sender and destination chains. This introduces the requirement of a mapping of relayer operator's addresses on both chains.
To achieve the stated requirements, the **fee middleware module has two main groups of functionality**:
- Registering of relayer addresses associated with each party involved in relaying the packet on the source chain. This registration process can be automated on start up of relayer infrastructure and happens only once, not every packet flow.
This is described in the [Fee distribution section](04-fee-distribution.md).
- Escrowing fees by any party which will be paid out to each rightful party on completion of the packet lifecycle.
This is described in the [Fee messages section](03-msgs.md).
We complete the introduction by giving a list of definitions of relevant terminology.
`Forward relayer`: The relayer that submits the `MsgRecvPacket` message for a given packet (on the destination chain).
`Reverse relayer`: The relayer that submits the `MsgAcknowledgement` message for a given packet (on the source chain).
`Timeout relayer`: The relayer that submits the `MsgTimeout` or `MsgTimeoutOnClose` messages for a given packet (on the source chain).
`Payee`: The account address on the source chain to be paid on completion of the packet lifecycle. The packet lifecycle on the source chain completes with the receipt of a `MsgTimeout`/`MsgTimeoutOnClose` or a `MsgAcknowledgement`.
`Counterparty payee`: The account address to be paid on completion of the packet lifecycle on the destination chain. The package lifecycle on the destination chain completes with a successful `MsgRecvPacket`.
`Refund address`: The address of the account paying for the incentivization of packet relaying. The account is refunded timeout fees upon successful acknowledgement. In the event of a packet timeout, both acknowledgement and receive fees are refunded.
## Known Limitations
- At the time of the release of the feature (ibc-go v4) fee payments middleware only supported incentivisation of new channels; however, with the release of channel upgradeability (ibc-go v8.1) it is possible to enable incentivisation of all existing channels.
- Even though unlikely, there exists a DoS attack vector on a fee-enabled channel if 1) there exists a relayer software implementation that is incentivised to timeout packets if the timeout fee is greater than the sum of the fees to receive and acknowledge the packet, and 2) only this type of implementation is used by operators relaying on the channel. In this situation, an attacker could continuously incentivise the relayers to never deliver the packets by incrementing the timeout fee of the packets above the sum of the receive and acknowledge fees. However, this situation is unlikely to occur because 1) another relayer behaving honestly could relay the packets before they timeout, and 2) the attack would be costly because the attacker would need to incentivise the timeout fee of the packets with their own funds. Given the low impact and unlikelihood of the attack we have decided to accept this risk and not implement any mitigation mesaures.
## Module Integration
The Fee Middleware module, as the name suggests, plays the role of an IBC middleware and as such must be configured by chain developers to route and handle IBC messages correctly.
For Cosmos SDK chains this setup is done via the `app/app.go` file, where modules are constructed and configured in order to bootstrap the blockchain application.
## Example integration of the Fee Middleware module
```go
// app.go
// Register the AppModule for the fee middleware module
ModuleBasics = module.NewBasicManager(
...
ibcfee.AppModuleBasic{},
...
)
...
// Add module account permissions for the fee middleware module
maccPerms = map[string][]string{
...
ibcfeetypes.ModuleName: nil,
}
...
// Add fee middleware Keeper
type App struct {
...
IBCFeeKeeper ibcfeekeeper.Keeper
...
}
...
// Create store keys
keys := sdk.NewKVStoreKeys(
...
ibcfeetypes.StoreKey,
...
)
...
app.IBCFeeKeeper = ibcfeekeeper.NewKeeper(
appCodec, keys[ibcfeetypes.StoreKey],
app.IBCKeeper.ChannelKeeper, // may be replaced with IBC middleware
app.IBCKeeper.ChannelKeeper,
&app.IBCKeeper.PortKeeper, app.AccountKeeper, app.BankKeeper,
)
// See the section below for configuring an application stack with the fee middleware module
...
// Register fee middleware AppModule
app.moduleManager = module.NewManager(
...
ibcfee.NewAppModule(app.IBCFeeKeeper),
)
...
// Add fee middleware to begin blocker logic
app.moduleManager.SetOrderBeginBlockers(
...
ibcfeetypes.ModuleName,
...
)
// Add fee middleware to end blocker logic
app.moduleManager.SetOrderEndBlockers(
...
ibcfeetypes.ModuleName,
...
)
// Add fee middleware to init genesis logic
app.moduleManager.SetOrderInitGenesis(
...
ibcfeetypes.ModuleName,
...
)
```
## Configuring an application stack with Fee Middleware
As mentioned in [IBC middleware development](../../01-ibc/04-middleware/02-develop.md) an application stack may be composed of many or no middlewares that nest a base application.
These layers form the complete set of application logic that enable developers to build composable and flexible IBC application stacks.
For example, an application stack may be just a single base application like `transfer`, however, the same application stack composed with `29-fee` will nest the `transfer` base application
by wrapping it with the Fee Middleware module.
### Transfer
See below for an example of how to create an application stack using `transfer` and `29-fee`.
The following `transferStack` is configured in `app/app.go` and added to the IBC `Router`.
The in-line comments describe the execution flow of packets between the application stack and IBC core.
```go
// Create Transfer Stack
// SendPacket, since it is originating from the application to core IBC:
// transferKeeper.SendPacket -> fee.SendPacket -> channel.SendPacket
// RecvPacket, message that originates from core IBC and goes down to app, the flow is the other way
// channel.RecvPacket -> fee.OnRecvPacket -> transfer.OnRecvPacket
// transfer stack contains (from top to bottom):
// - IBC Fee Middleware
// - Transfer
// create IBC module from bottom to top of stack
var transferStack porttypes.IBCModule
transferStack = transfer.NewIBCModule(app.TransferKeeper)
transferStack = ibcfee.NewIBCMiddleware(transferStack, app.IBCFeeKeeper)
// Add transfer stack to IBC Router
ibcRouter.AddRoute(ibctransfertypes.ModuleName, transferStack)
```
### Interchain Accounts
See below for an example of how to create an application stack using `27-interchain-accounts` and `29-fee`.
The following `icaControllerStack` and `icaHostStack` are configured in `app/app.go` and added to the IBC `Router` with the associated authentication module.
The in-line comments describe the execution flow of packets between the application stack and IBC core.
```go
// Create Interchain Accounts Stack
// SendPacket, since it is originating from the application to core IBC:
// icaAuthModuleKeeper.SendTx -> icaController.SendPacket -> fee.SendPacket -> channel.SendPacket
// initialize ICA module with mock module as the authentication module on the controller side
var icaControllerStack porttypes.IBCModule
icaControllerStack = ibcmock.NewIBCModule(&mockModule, ibcmock.NewMockIBCApp("", scopedICAMockKeeper))
app.ICAAuthModule = icaControllerStack.(ibcmock.IBCModule)
icaControllerStack = icacontroller.NewIBCMiddleware(icaControllerStack, app.ICAControllerKeeper)
icaControllerStack = ibcfee.NewIBCMiddleware(icaControllerStack, app.IBCFeeKeeper)
// RecvPacket, message that originates from core IBC and goes down to app, the flow is:
// channel.RecvPacket -> fee.OnRecvPacket -> icaHost.OnRecvPacket
var icaHostStack porttypes.IBCModule
icaHostStack = icahost.NewIBCModule(app.ICAHostKeeper)
icaHostStack = ibcfee.NewIBCMiddleware(icaHostStack, app.IBCFeeKeeper)
// Add authentication module, controller and host to IBC router
ibcRouter.
// the ICA Controller middleware needs to be explicitly added to the IBC Router because the
// ICA controller module owns the port capability for ICA. The ICA authentication module
// owns the channel capability.
AddRoute(ibcmock.ModuleName+icacontrollertypes.SubModuleName, icaControllerStack) // ica with mock auth module stack route to ica (top level of middleware stack)
AddRoute(icacontrollertypes.SubModuleName, icaControllerStack).
AddRoute(icahosttypes.SubModuleName, icaHostStack).
```
## Fee Distribution
Packet fees are divided into 3 distinct amounts in order to compensate relayer operators for packet relaying on fee enabled IBC channels.
- `RecvFee`: The sum of all packet receive fees distributed to a payee for successful execution of `MsgRecvPacket`.
- `AckFee`: The sum of all packet acknowledgement fees distributed to a payee for successful execution of `MsgAcknowledgement`.
- `TimeoutFee`: The sum of all packet timeout fees distributed to a payee for successful execution of `MsgTimeout`.
## Register a counterparty payee address for forward relaying
As mentioned in [ICS29 Concepts](01-overview.md#concepts), the forward relayer describes the actor who performs the submission of `MsgRecvPacket` on the destination chain.
Fee distribution for incentivized packet relays takes place on the packet source chain.
> Relayer operators are expected to register a counterparty payee address, in order to be compensated accordingly with `RecvFee`s upon completion of a packet lifecycle.
The counterparty payee address registered on the destination chain is encoded into the packet acknowledgement and communicated as such to the source chain for fee distribution.
**If a counterparty payee is not registered for the forward relayer on the destination chain, the escrowed fees will be refunded upon fee distribution.**
### Relayer operator actions
A transaction must be submitted **to the destination chain** including a `CounterpartyPayee` address of an account on the source chain.
The transaction must be signed by the `Relayer`.
Note: If a module account address is used as the `CounterpartyPayee` but the module has been set as a blocked address in the `BankKeeper`, the refunding to the module account will fail. This is because many modules use invariants to compare internal tracking of module account balances against the actual balance of the account stored in the `BankKeeper`. If a token transfer to the module account occurs without going through this module and updating the account balance of the module on the `BankKeeper`, then invariants may break and unknown behaviour could occur depending on the module implementation. Therefore, if it is desirable to use a module account that is currently blocked, the module developers should be consulted to gauge to possibility of removing the module account from the blocked list.
```go
type MsgRegisterCounterpartyPayee struct {
// unique port identifier
PortId string
// unique channel identifier
ChannelId string
// the relayer address
Relayer string
// the counterparty payee address
CounterpartyPayee string
}
```
> This message is expected to fail if:
>
> - `PortId` is invalid (see [24-host naming requirements](https://github.com/cosmos/ibc/blob/master/spec/core/ics-024-host-requirements/README.md#paths-identifiers-separators).
> - `ChannelId` is invalid (see [24-host naming requirements](https://github.com/cosmos/ibc/blob/master/spec/core/ics-024-host-requirements/README.md#paths-identifiers-separators)).
> - `Relayer` is an invalid address (see [Cosmos SDK Addresses](https://github.com/cosmos/cosmos-sdk/blob/main/docs/learn/beginner/03-accounts.md#addresses)).
> - `CounterpartyPayee` is empty or contains more than 2048 bytes.
See below for an example CLI command:
```bash
simd tx ibc-fee register-counterparty-payee transfer channel-0 \
cosmos1rsp837a4kvtgp2m4uqzdge0zzu6efqgucm0qdh \
osmo1v5y0tz01llxzf4c2afml8s3awue0ymju22wxx2 \
--from cosmos1rsp837a4kvtgp2m4uqzdge0zzu6efqgucm0qdh
```
## Register an alternative payee address for reverse and timeout relaying
As mentioned in [ICS29 Concepts](01-overview.md#concepts), the reverse relayer describes the actor who performs the submission of `MsgAcknowledgement` on the source chain.
Similarly the timeout relayer describes the actor who performs the submission of `MsgTimeout` (or `MsgTimeoutOnClose`) on the source chain.
> Relayer operators **may choose** to register an optional payee address, in order to be compensated accordingly with `AckFee`s and `TimeoutFee`s upon completion of a packet life cycle.
If a payee is not registered for the reverse or timeout relayer on the source chain, then fee distribution assumes the default behaviour, where fees are paid out to the relayer account which delivers `MsgAcknowledgement` or `MsgTimeout`/`MsgTimeoutOnClose`.
### Relayer operator actions
A transaction must be submitted **to the source chain** including a `Payee` address of an account on the source chain.
The transaction must be signed by the `Relayer`.
Note: If a module account address is used as the `Payee` it is recommended to [turn off invariant checks](https://github.com/cosmos/ibc-go/blob/v7.0.0/testing/simapp/app.go#L727) for that module.
```go
type MsgRegisterPayee struct {
// unique port identifier
PortId string
// unique channel identifier
ChannelId string
// the relayer address
Relayer string
// the payee address
Payee string
}
```
> This message is expected to fail if:
>
> - `PortId` is invalid (see [24-host naming requirements](https://github.com/cosmos/ibc/blob/master/spec/core/ics-024-host-requirements/README.md#paths-identifiers-separators).
> - `ChannelId` is invalid (see [24-host naming requirements](https://github.com/cosmos/ibc/blob/master/spec/core/ics-024-host-requirements/README.md#paths-identifiers-separators)).
> - `Relayer` is an invalid address (see [Cosmos SDK Addresses](https://github.com/cosmos/cosmos-sdk/blob/main/docs/learn/beginner/03-accounts.md#addresses)).
> - `Payee` is an invalid address (see [Cosmos SDK Addresses](https://github.com/cosmos/cosmos-sdk/blob/main/docs/learn/beginner/03-accounts.md#addresses)).
See below for an example CLI command:
```bash
simd tx ibc-fee register-payee transfer channel-0 \
cosmos1rsp837a4kvtgp2m4uqzdge0zzu6efqgucm0qdh \
cosmos153lf4zntqt33a4v0sm5cytrxyqn78q7kz8j8x5 \
--from cosmos1rsp837a4kvtgp2m4uqzdge0zzu6efqgucm0qdh
```

View File

@ -1,178 +0,0 @@
---
title: Overview
---
# Overview
:::note Synopsis
Learn about what the token Transfer module is
:::
## What is the Transfer module?
Transfer is the Cosmos SDK implementation of the [ICS-20](https://github.com/cosmos/ibc/tree/master/spec/app/ics-020-fungible-token-transfer) protocol, which enables cross-chain fungible token transfers.
## Concepts
### Acknowledgements
ICS20 uses the recommended acknowledgement format as specified by [ICS 04](https://github.com/cosmos/ibc/tree/master/spec/core/ics-004-channel-and-packet-semantics#acknowledgement-envelope).
A successful receive of a transfer packet will result in a Result Acknowledgement being written
with the value `[]byte{byte(1)}` in the `Response` field.
An unsuccessful receive of a transfer packet will result in an Error Acknowledgement being written
with the error message in the `Response` field.
### Denomination trace
The denomination trace corresponds to the information that allows a token to be traced back to its
origin chain. It contains a sequence of port and channel identifiers ordered from the most recent to
the oldest in the timeline of transfers.
This information is included on the token's base denomination field in the form of a hash to prevent an
unbounded denomination length. For example, the token `transfer/channelToA/uatom` will be displayed
as `ibc/7F1D3FCF4AE79E1554D670D1AD949A9BA4E4A3C76C63093E17E446A46061A7A2`. The human readable denomination
is stored using `x/bank` module's [denom metadata](https://docs.cosmos.network/main/build/modules/bank#denom-metadata)
feature. You may display the human readable denominations by querying balances with the `--resolve-denom` flag, as in:
```shell
simd query bank balances [address] --resolve-denom
```
Each send to any chain other than the one it was previously received from is a movement forwards in
the token's timeline. This causes trace to be added to the token's history and the destination port
and destination channel to be prefixed to the denomination. In these instances the sender chain is
acting as the "source zone". When the token is sent back to the chain it previously received from, the
prefix is removed. This is a backwards movement in the token's timeline and the sender chain is
acting as the "sink zone".
It is strongly recommended to read the full details of [ADR 001: Coin Source Tracing](/architecture/adr-001-coin-source-tracing) to understand the implications and context of the IBC token representations.
## UX suggestions for clients
For clients (wallets, exchanges, applications, block explorers, etc) that want to display the source of the token, it is recommended to use the following alternatives for each of the cases below:
### Direct connection
If the denomination trace contains a single identifier prefix pair (as in the example above), then
the easiest way to retrieve the chain and light client identifier is to map the trace information
directly. In summary, this requires querying the channel from the denomination trace identifiers,
and then the counterparty client state using the counterparty port and channel identifiers from the
retrieved channel.
A general pseudo algorithm would look like the following:
1. Query the full denomination trace.
2. Query the channel with the `portID/channelID` pair, which corresponds to the first destination of the
token.
3. Query the client state using the identifiers pair. Note that this query will return a `"Not
Found"` response if the current chain is not connected to this channel.
4. Retrieve the client identifier or chain identifier from the client state (eg: on
Tendermint clients) and store it locally.
Using the gRPC gateway client service the steps above would be, with a given IBC token `ibc/7F1D3FCF4AE79E1554D670D1AD949A9BA4E4A3C76C63093E17E446A46061A7A2` stored on `chainB`:
1. `GET /ibc/apps/transfer/v1/denom_traces/7F1D3FCF4AE79E1554D670D1AD949A9BA4E4A3C76C63093E17E446A46061A7A2` -> `{"path": "transfer/channelToA", "base_denom": "uatom"}`
2. `GET /ibc/apps/transfer/v1/channels/channelToA/ports/transfer/client_state"` -> `{"client_id": "clientA", "chain-id": "chainA", ...}`
3. `GET /ibc/apps/transfer/v1/channels/channelToA/ports/transfer"` -> `{"channel_id": "channelToA", port_id": "transfer", counterparty: {"channel_id": "channelToB", port_id": "transfer"}, ...}`
4. `GET /ibc/apps/transfer/v1/channels/channelToB/ports/transfer/client_state" -> {"client_id": "clientB", "chain-id": "chainB", ...}`
Then, the token transfer chain path for the `uatom` denomination would be: `chainA` -> `chainB`.
### Multiple hops
The multiple channel hops case applies when the token has passed through multiple chains between the original source and final destination chains.
The IBC protocol doesn't know the topology of the overall network (i.e connections between chains and identifier names between them). For this reason, in the multiple hops case, a particular chain in the timeline of the individual transfers can't query the chain and client identifiers of the other chains.
Take for example the following sequence of transfers `A -> B -> C` for an IBC token, with a final prefix path (trace info) of `transfer/channelChainC/transfer/channelChainB`. What the paragraph above means is that even in the case that chain `C` is directly connected to chain `A`, querying the port and channel identifiers that chain `B` uses to connect to chain `A` (eg: `transfer/channelChainA`) can be completely different from the one that chain `C` uses to connect to chain `A` (eg: `transfer/channelToChainA`).
Thus the proposed solution for clients that the IBC team recommends are the following:
- **Connect to all chains**: Connecting to all the chains in the timeline would allow clients to
perform the queries outlined in the [direct connection](#direct-connection) section to each
relevant chain. By repeatedly following the port and channel denomination trace transfer timeline,
clients should always be able to find all the relevant identifiers. This comes at the tradeoff
that the client must connect to nodes on each of the chains in order to perform the queries.
- **Relayer as a Service (RaaS)**: A longer term solution is to use/create a relayer service that
could map the denomination trace to the chain path timeline for each token (i.e `origin chain ->
chain #1 -> ... -> chain #(n-1) -> final chain`). These services could provide merkle proofs in
order to allow clients to optionally verify the path timeline correctness for themselves by
running light clients. If the proofs are not verified, they should be considered as trusted third
parties services. Additionally, client would be advised in the future to use RaaS that support the
largest number of connections between chains in the ecosystem. Unfortunately, none of the existing
public relayers (in [Golang](https://github.com/cosmos/relayer) and
[Rust](https://github.com/informalsystems/ibc-rs)), provide this service to clients.
:::tip
The only viable alternative for clients (at the time of writing) to tokens with multiple connection hops, is to connect to all chains directly and perform relevant queries to each of them in the sequence.
:::
## Forwarding
:::info
Token forwarding and unwinding is supported only on ICS20 v2 transfer channels.
:::
Forwarding allows tokens to be routed to a final destination through multiple (up to 8) intermediary
chains. With forwarding, it's also possible to unwind IBC vouchers to their native chain, and forward
them afterwards to another destination, all with just a single transfer transaction on the sending chain.
### Forward tokens
Native tokens or IBC vouchers on any chain can be forwarded through intermediary chains to reach their
final destination. For example, given the topology below, with 3 chains and a transfer channel between
chains A and B and between chains B and C:
![Light Mode Forwarding](./images/forwarding-3-chains-light.png#gh-light-mode-only)![Dark Mode Forwarding](./images/forwarding-3-chains-dark.png#gh-dark-mode-only)
Native tokens on chain `A` can be sent to chain `C` through chain `B`. The routing is specified by the
source port ID and channel ID of choice on every intermediary chain. In this example, there is only one
forwarding hop on chain `B` and the port ID, channel ID pair is `transfer`, `channelBToC`. Forwarding of
a multi-denom collections of tokens is also allowed (i.e. forwarding of tokens of different denominations).
### Unwind tokens
Taking again as an example the topology from the previous section, we assume that native tokens on chain `A`
have been transferred to chain `C`. The IBC vouchers on chain `C` have the denomination trace
`transfer/channelCtoB/transfer/channelBtoA`, and with forwarding it is possible to submit a transfer message
on chain `C` and automatically unwind the vouchers through chain `B` to chain `A`, so that the tokens recovered
on the origin chain regain their native denomination. In order to execute automatic unwinding, the transfer
module does not require extra user input: the unwind route is encoded in the denomination trace with the
pairs of destination port ID, channel ID that are added on every chain where the tokens are received.
Please note that unwinding of vouchers is only allowed when vouchers transferred all share the same denomination
trace (signifying coins that all originate from the same source). It is not possible to unwind vouchers of two different
IBC denominations, since they come from different source chains.
### Unwind tokens and then forward
Unwinding and forwarding can be used in combination, so that vouchers are first unwound to their origin chain
and then forwarded to a final destination. The same restriction as in the unwinding case applies: only vouchers
of a single IBC denomination can be used.
## Locked funds
In some [exceptional cases](/architecture/adr-026-ibc-client-recovery-mechanisms#exceptional-cases), a client state associated with a given channel cannot be updated. This causes that funds from fungible tokens in that channel will be permanently locked and thus can no longer be transferred.
To mitigate this, a client update governance proposal can be submitted to update the frozen client
with a new valid header. Once the proposal passes the client state will be unfrozen and the funds
from the associated channels will then be unlocked. This mechanism only applies to clients that
allow updates via governance, such as Tendermint clients.
In addition to this, it's important to mention that a token must be sent back along the exact route
that it took originally in order to return it to its original form on the source chain (eg: the
Cosmos Hub for the `uatom`). Sending a token back to the same chain across a different channel will
**not** move the token back across its timeline. If a channel in the chain history closes before the
token can be sent back across that channel, then the token will not be returnable to its original
form.
## Security considerations
For safety, no other module must be capable of minting tokens with the `ibc/` prefix. The IBC
transfer module needs a subset of the denomination space that only it can create tokens in.
## Channel Closure
The IBC transfer module does not support channel closure.

View File

@ -1,55 +0,0 @@
site_name: Sonr Docs
site_description: Sonr is a decentralized identity network built on the Cosmos-sdk. It has early origins as a peer-to-peer file sharing network, but has since evolved into a platform for decentralized authentication and authorization. The early lessons taught from our file sharing roots are used as our theology for building the Sonr Blockchain.
site_url: https://onsonr.dev
theme:
name: material
features:
- announce.dismiss
- content.action.edit
- content.action.view
- content.code.annotate
- content.code.copy
- content.code.select
# - content.footnote.tooltips
# - content.tabs.link
- content.tooltips
- header.autohide
# - navigation.expand
- navigation.footer
- navigation.indexes
- navigation.instant
- navigation.instant.prefetch
# - navigation.instant.progress
# - navigation.prune
- navigation.sections
- navigation.tabs
- navigation.tabs.sticky
- navigation.top
- navigation.tracking
- search.highlight
- search.share
- search.suggest
- toc.follow
- toc.integrate
palette:
- media: "(prefers-color-scheme)"
toggle:
icon: material/link
name: Switch to light mode
- media: "(prefers-color-scheme: light)"
scheme: default
primary: cyan
accent: cyan
toggle:
icon: material/moon-waning-crescent
name: Switch to dark mode
- media: "(prefers-color-scheme: dark)"
scheme: slate
primary: black
accent: cyan
toggle:
icon: material/sun
name: Switch to system preference
font:
text: Geist
code: Geist Mono

View File

@ -8,92 +8,61 @@ import (
"github.com/jackc/pgx/v5/pgtype"
)
type Account struct {
ID string
CreatedAt pgtype.Timestamptz
UpdatedAt pgtype.Timestamptz
DeletedAt pgtype.Timestamptz
Number int64
Sequence int32
Address string
PublicKey string
ChainID string
Controller string
IsSubsidiary bool
IsValidator bool
IsDelegator bool
IsAccountable bool
}
type Asset struct {
ID string
CreatedAt pgtype.Timestamptz
UpdatedAt pgtype.Timestamptz
DeletedAt pgtype.Timestamptz
Name string
Symbol string
Decimals int32
ChainID string
Channel string
AssetType string
CoingeckoID pgtype.Text
}
type Credential struct {
ID string
CreatedAt pgtype.Timestamptz
UpdatedAt pgtype.Timestamptz
DeletedAt pgtype.Timestamptz
Handle string
CredentialID string
AuthenticatorAttachment string
Origin string
Type string
Transports string
ID string `json:"id"`
CreatedAt pgtype.Timestamptz `json:"created_at"`
UpdatedAt pgtype.Timestamptz `json:"updated_at"`
DeletedAt pgtype.Timestamptz `json:"deleted_at"`
Handle string `json:"handle"`
CredentialID string `json:"credential_id"`
AuthenticatorAttachment string `json:"authenticator_attachment"`
Origin string `json:"origin"`
Type string `json:"type"`
Transports string `json:"transports"`
}
type Profile struct {
ID string
CreatedAt pgtype.Timestamptz
UpdatedAt pgtype.Timestamptz
DeletedAt pgtype.Timestamptz
Address string
Handle string
Origin string
Name string
Status string
ID string `json:"id"`
CreatedAt pgtype.Timestamptz `json:"created_at"`
UpdatedAt pgtype.Timestamptz `json:"updated_at"`
DeletedAt pgtype.Timestamptz `json:"deleted_at"`
Address string `json:"address"`
Handle string `json:"handle"`
Origin string `json:"origin"`
Name string `json:"name"`
Status string `json:"status"`
}
type Session struct {
ID string
CreatedAt pgtype.Timestamptz
UpdatedAt pgtype.Timestamptz
DeletedAt pgtype.Timestamptz
BrowserName string
BrowserVersion string
ClientIpaddr string
Platform string
IsDesktop bool
IsMobile bool
IsTablet bool
IsTv bool
IsBot bool
Challenge string
IsHumanFirst bool
IsHumanLast bool
ProfileID string
ID string `json:"id"`
CreatedAt pgtype.Timestamptz `json:"created_at"`
UpdatedAt pgtype.Timestamptz `json:"updated_at"`
DeletedAt pgtype.Timestamptz `json:"deleted_at"`
BrowserName string `json:"browser_name"`
BrowserVersion string `json:"browser_version"`
ClientIpaddr string `json:"client_ipaddr"`
Platform string `json:"platform"`
IsDesktop bool `json:"is_desktop"`
IsMobile bool `json:"is_mobile"`
IsTablet bool `json:"is_tablet"`
IsTv bool `json:"is_tv"`
IsBot bool `json:"is_bot"`
Challenge string `json:"challenge"`
IsHumanFirst bool `json:"is_human_first"`
IsHumanLast bool `json:"is_human_last"`
ProfileID string `json:"profile_id"`
}
type Vault struct {
ID int64
CreatedAt pgtype.Timestamptz
UpdatedAt pgtype.Timestamptz
DeletedAt pgtype.Timestamptz
Handle string
Origin string
Address string
Cid string
Config []byte
SessionID int64
RedirectUri string
ID int64 `json:"id"`
CreatedAt pgtype.Timestamptz `json:"created_at"`
UpdatedAt pgtype.Timestamptz `json:"updated_at"`
DeletedAt pgtype.Timestamptz `json:"deleted_at"`
Handle string `json:"handle"`
Origin string `json:"origin"`
Address string `json:"address"`
Cid string `json:"cid"`
Config []byte `json:"config"`
SessionID int64 `json:"session_id"`
RedirectUri string `json:"redirect_uri"`
}

View File

@ -0,0 +1,34 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.27.0
package hwayorm
import (
"context"
)
type Querier interface {
CheckHandleExists(ctx context.Context, handle string) (bool, error)
CreateSession(ctx context.Context, arg CreateSessionParams) (*Session, error)
GetChallengeBySessionID(ctx context.Context, id string) (string, error)
GetCredentialByID(ctx context.Context, credentialID string) (*Credential, error)
GetCredentialsByHandle(ctx context.Context, handle string) ([]*Credential, error)
GetHumanVerificationNumbers(ctx context.Context, id string) (*GetHumanVerificationNumbersRow, error)
GetProfileByAddress(ctx context.Context, address string) (*Profile, error)
GetProfileByHandle(ctx context.Context, handle string) (*Profile, error)
GetProfileByID(ctx context.Context, id string) (*Profile, error)
GetSessionByClientIP(ctx context.Context, clientIpaddr string) (*Session, error)
GetSessionByID(ctx context.Context, id string) (*Session, error)
GetVaultConfigByCID(ctx context.Context, cid string) (*Vault, error)
GetVaultRedirectURIBySessionID(ctx context.Context, sessionID int64) (string, error)
InsertCredential(ctx context.Context, arg InsertCredentialParams) (*Credential, error)
InsertProfile(ctx context.Context, arg InsertProfileParams) (*Profile, error)
SoftDeleteCredential(ctx context.Context, credentialID string) error
SoftDeleteProfile(ctx context.Context, address string) error
UpdateProfile(ctx context.Context, arg UpdateProfileParams) (*Profile, error)
UpdateSessionHumanVerification(ctx context.Context, arg UpdateSessionHumanVerificationParams) (*Session, error)
UpdateSessionWithProfileID(ctx context.Context, arg UpdateSessionWithProfileIDParams) (*Session, error)
}
var _ Querier = (*Queries)(nil)

View File

@ -1,7 +1,7 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.27.0
// source: query_highway.sql
// source: query.sql
package hwayorm
@ -43,23 +43,23 @@ RETURNING id, created_at, updated_at, deleted_at, browser_name, browser_version,
`
type CreateSessionParams struct {
ID string
BrowserName string
BrowserVersion string
ClientIpaddr string
Platform string
IsDesktop bool
IsMobile bool
IsTablet bool
IsTv bool
IsBot bool
Challenge string
IsHumanFirst bool
IsHumanLast bool
ProfileID string
ID string `json:"id"`
BrowserName string `json:"browser_name"`
BrowserVersion string `json:"browser_version"`
ClientIpaddr string `json:"client_ipaddr"`
Platform string `json:"platform"`
IsDesktop bool `json:"is_desktop"`
IsMobile bool `json:"is_mobile"`
IsTablet bool `json:"is_tablet"`
IsTv bool `json:"is_tv"`
IsBot bool `json:"is_bot"`
Challenge string `json:"challenge"`
IsHumanFirst bool `json:"is_human_first"`
IsHumanLast bool `json:"is_human_last"`
ProfileID string `json:"profile_id"`
}
func (q *Queries) CreateSession(ctx context.Context, arg CreateSessionParams) (Session, error) {
func (q *Queries) CreateSession(ctx context.Context, arg CreateSessionParams) (*Session, error) {
row := q.db.QueryRow(ctx, createSession,
arg.ID,
arg.BrowserName,
@ -96,7 +96,7 @@ func (q *Queries) CreateSession(ctx context.Context, arg CreateSessionParams) (S
&i.IsHumanLast,
&i.ProfileID,
)
return i, err
return &i, err
}
const getChallengeBySessionID = `-- name: GetChallengeBySessionID :one
@ -119,7 +119,7 @@ AND deleted_at IS NULL
LIMIT 1
`
func (q *Queries) GetCredentialByID(ctx context.Context, credentialID string) (Credential, error) {
func (q *Queries) GetCredentialByID(ctx context.Context, credentialID string) (*Credential, error) {
row := q.db.QueryRow(ctx, getCredentialByID, credentialID)
var i Credential
err := row.Scan(
@ -134,7 +134,7 @@ func (q *Queries) GetCredentialByID(ctx context.Context, credentialID string) (C
&i.Type,
&i.Transports,
)
return i, err
return &i, err
}
const getCredentialsByHandle = `-- name: GetCredentialsByHandle :many
@ -143,13 +143,13 @@ WHERE handle = $1
AND deleted_at IS NULL
`
func (q *Queries) GetCredentialsByHandle(ctx context.Context, handle string) ([]Credential, error) {
func (q *Queries) GetCredentialsByHandle(ctx context.Context, handle string) ([]*Credential, error) {
rows, err := q.db.Query(ctx, getCredentialsByHandle, handle)
if err != nil {
return nil, err
}
defer rows.Close()
var items []Credential
var items []*Credential
for rows.Next() {
var i Credential
if err := rows.Scan(
@ -166,7 +166,7 @@ func (q *Queries) GetCredentialsByHandle(ctx context.Context, handle string) ([]
); err != nil {
return nil, err
}
items = append(items, i)
items = append(items, &i)
}
if err := rows.Err(); err != nil {
return nil, err
@ -181,15 +181,15 @@ LIMIT 1
`
type GetHumanVerificationNumbersRow struct {
IsHumanFirst bool
IsHumanLast bool
IsHumanFirst bool `json:"is_human_first"`
IsHumanLast bool `json:"is_human_last"`
}
func (q *Queries) GetHumanVerificationNumbers(ctx context.Context, id string) (GetHumanVerificationNumbersRow, error) {
func (q *Queries) GetHumanVerificationNumbers(ctx context.Context, id string) (*GetHumanVerificationNumbersRow, error) {
row := q.db.QueryRow(ctx, getHumanVerificationNumbers, id)
var i GetHumanVerificationNumbersRow
err := row.Scan(&i.IsHumanFirst, &i.IsHumanLast)
return i, err
return &i, err
}
const getProfileByAddress = `-- name: GetProfileByAddress :one
@ -198,7 +198,7 @@ WHERE address = $1 AND deleted_at IS NULL
LIMIT 1
`
func (q *Queries) GetProfileByAddress(ctx context.Context, address string) (Profile, error) {
func (q *Queries) GetProfileByAddress(ctx context.Context, address string) (*Profile, error) {
row := q.db.QueryRow(ctx, getProfileByAddress, address)
var i Profile
err := row.Scan(
@ -212,7 +212,7 @@ func (q *Queries) GetProfileByAddress(ctx context.Context, address string) (Prof
&i.Name,
&i.Status,
)
return i, err
return &i, err
}
const getProfileByHandle = `-- name: GetProfileByHandle :one
@ -222,7 +222,7 @@ AND deleted_at IS NULL
LIMIT 1
`
func (q *Queries) GetProfileByHandle(ctx context.Context, handle string) (Profile, error) {
func (q *Queries) GetProfileByHandle(ctx context.Context, handle string) (*Profile, error) {
row := q.db.QueryRow(ctx, getProfileByHandle, handle)
var i Profile
err := row.Scan(
@ -236,7 +236,7 @@ func (q *Queries) GetProfileByHandle(ctx context.Context, handle string) (Profil
&i.Name,
&i.Status,
)
return i, err
return &i, err
}
const getProfileByID = `-- name: GetProfileByID :one
@ -245,7 +245,7 @@ WHERE id = $1 AND deleted_at IS NULL
LIMIT 1
`
func (q *Queries) GetProfileByID(ctx context.Context, id string) (Profile, error) {
func (q *Queries) GetProfileByID(ctx context.Context, id string) (*Profile, error) {
row := q.db.QueryRow(ctx, getProfileByID, id)
var i Profile
err := row.Scan(
@ -259,7 +259,7 @@ func (q *Queries) GetProfileByID(ctx context.Context, id string) (Profile, error
&i.Name,
&i.Status,
)
return i, err
return &i, err
}
const getSessionByClientIP = `-- name: GetSessionByClientIP :one
@ -268,7 +268,7 @@ WHERE client_ipaddr = $1 AND deleted_at IS NULL
LIMIT 1
`
func (q *Queries) GetSessionByClientIP(ctx context.Context, clientIpaddr string) (Session, error) {
func (q *Queries) GetSessionByClientIP(ctx context.Context, clientIpaddr string) (*Session, error) {
row := q.db.QueryRow(ctx, getSessionByClientIP, clientIpaddr)
var i Session
err := row.Scan(
@ -290,7 +290,7 @@ func (q *Queries) GetSessionByClientIP(ctx context.Context, clientIpaddr string)
&i.IsHumanLast,
&i.ProfileID,
)
return i, err
return &i, err
}
const getSessionByID = `-- name: GetSessionByID :one
@ -299,7 +299,7 @@ WHERE id = $1 AND deleted_at IS NULL
LIMIT 1
`
func (q *Queries) GetSessionByID(ctx context.Context, id string) (Session, error) {
func (q *Queries) GetSessionByID(ctx context.Context, id string) (*Session, error) {
row := q.db.QueryRow(ctx, getSessionByID, id)
var i Session
err := row.Scan(
@ -321,7 +321,7 @@ func (q *Queries) GetSessionByID(ctx context.Context, id string) (Session, error
&i.IsHumanLast,
&i.ProfileID,
)
return i, err
return &i, err
}
const getVaultConfigByCID = `-- name: GetVaultConfigByCID :one
@ -331,7 +331,7 @@ AND deleted_at IS NULL
LIMIT 1
`
func (q *Queries) GetVaultConfigByCID(ctx context.Context, cid string) (Vault, error) {
func (q *Queries) GetVaultConfigByCID(ctx context.Context, cid string) (*Vault, error) {
row := q.db.QueryRow(ctx, getVaultConfigByCID, cid)
var i Vault
err := row.Scan(
@ -347,7 +347,7 @@ func (q *Queries) GetVaultConfigByCID(ctx context.Context, cid string) (Vault, e
&i.SessionID,
&i.RedirectUri,
)
return i, err
return &i, err
}
const getVaultRedirectURIBySessionID = `-- name: GetVaultRedirectURIBySessionID :one
@ -376,14 +376,14 @@ RETURNING id, created_at, updated_at, deleted_at, handle, credential_id, authent
`
type InsertCredentialParams struct {
Handle string
CredentialID string
Origin string
Type string
Transports string
Handle string `json:"handle"`
CredentialID string `json:"credential_id"`
Origin string `json:"origin"`
Type string `json:"type"`
Transports string `json:"transports"`
}
func (q *Queries) InsertCredential(ctx context.Context, arg InsertCredentialParams) (Credential, error) {
func (q *Queries) InsertCredential(ctx context.Context, arg InsertCredentialParams) (*Credential, error) {
row := q.db.QueryRow(ctx, insertCredential,
arg.Handle,
arg.CredentialID,
@ -404,7 +404,7 @@ func (q *Queries) InsertCredential(ctx context.Context, arg InsertCredentialPara
&i.Type,
&i.Transports,
)
return i, err
return &i, err
}
const insertProfile = `-- name: InsertProfile :one
@ -418,13 +418,13 @@ RETURNING id, created_at, updated_at, deleted_at, address, handle, origin, name,
`
type InsertProfileParams struct {
Address string
Handle string
Origin string
Name string
Address string `json:"address"`
Handle string `json:"handle"`
Origin string `json:"origin"`
Name string `json:"name"`
}
func (q *Queries) InsertProfile(ctx context.Context, arg InsertProfileParams) (Profile, error) {
func (q *Queries) InsertProfile(ctx context.Context, arg InsertProfileParams) (*Profile, error) {
row := q.db.QueryRow(ctx, insertProfile,
arg.Address,
arg.Handle,
@ -443,7 +443,7 @@ func (q *Queries) InsertProfile(ctx context.Context, arg InsertProfileParams) (P
&i.Name,
&i.Status,
)
return i, err
return &i, err
}
const softDeleteCredential = `-- name: SoftDeleteCredential :exec
@ -480,12 +480,12 @@ RETURNING id, created_at, updated_at, deleted_at, address, handle, origin, name,
`
type UpdateProfileParams struct {
Name string
Handle string
Address string
Name string `json:"name"`
Handle string `json:"handle"`
Address string `json:"address"`
}
func (q *Queries) UpdateProfile(ctx context.Context, arg UpdateProfileParams) (Profile, error) {
func (q *Queries) UpdateProfile(ctx context.Context, arg UpdateProfileParams) (*Profile, error) {
row := q.db.QueryRow(ctx, updateProfile, arg.Name, arg.Handle, arg.Address)
var i Profile
err := row.Scan(
@ -499,7 +499,7 @@ func (q *Queries) UpdateProfile(ctx context.Context, arg UpdateProfileParams) (P
&i.Name,
&i.Status,
)
return i, err
return &i, err
}
const updateSessionHumanVerification = `-- name: UpdateSessionHumanVerification :one
@ -513,12 +513,12 @@ RETURNING id, created_at, updated_at, deleted_at, browser_name, browser_version,
`
type UpdateSessionHumanVerificationParams struct {
IsHumanFirst bool
IsHumanLast bool
ID string
IsHumanFirst bool `json:"is_human_first"`
IsHumanLast bool `json:"is_human_last"`
ID string `json:"id"`
}
func (q *Queries) UpdateSessionHumanVerification(ctx context.Context, arg UpdateSessionHumanVerificationParams) (Session, error) {
func (q *Queries) UpdateSessionHumanVerification(ctx context.Context, arg UpdateSessionHumanVerificationParams) (*Session, error) {
row := q.db.QueryRow(ctx, updateSessionHumanVerification, arg.IsHumanFirst, arg.IsHumanLast, arg.ID)
var i Session
err := row.Scan(
@ -540,7 +540,7 @@ func (q *Queries) UpdateSessionHumanVerification(ctx context.Context, arg Update
&i.IsHumanLast,
&i.ProfileID,
)
return i, err
return &i, err
}
const updateSessionWithProfileID = `-- name: UpdateSessionWithProfileID :one
@ -553,11 +553,11 @@ RETURNING id, created_at, updated_at, deleted_at, browser_name, browser_version,
`
type UpdateSessionWithProfileIDParams struct {
ProfileID string
ID string
ProfileID string `json:"profile_id"`
ID string `json:"id"`
}
func (q *Queries) UpdateSessionWithProfileID(ctx context.Context, arg UpdateSessionWithProfileIDParams) (Session, error) {
func (q *Queries) UpdateSessionWithProfileID(ctx context.Context, arg UpdateSessionWithProfileIDParams) (*Session, error) {
row := q.db.QueryRow(ctx, updateSessionWithProfileID, arg.ProfileID, arg.ID)
var i Session
err := row.Scan(
@ -579,5 +579,5 @@ func (q *Queries) UpdateSessionWithProfileID(ctx context.Context, arg UpdateSess
&i.IsHumanLast,
&i.ProfileID,
)
return i, err
return &i, err
}

View File

@ -10,90 +10,90 @@ import (
)
type Account struct {
ID string
CreatedAt time.Time
UpdatedAt time.Time
DeletedAt sql.NullTime
Number int64
Sequence int64
Address string
PublicKey string
ChainID string
Controller string
IsSubsidiary bool
IsValidator bool
IsDelegator bool
IsAccountable bool
ID string `json:"id"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
DeletedAt sql.NullTime `json:"deleted_at"`
Number int64 `json:"number"`
Sequence int64 `json:"sequence"`
Address string `json:"address"`
PublicKey string `json:"public_key"`
ChainID string `json:"chain_id"`
Controller string `json:"controller"`
IsSubsidiary bool `json:"is_subsidiary"`
IsValidator bool `json:"is_validator"`
IsDelegator bool `json:"is_delegator"`
IsAccountable bool `json:"is_accountable"`
}
type Asset struct {
ID string
CreatedAt time.Time
UpdatedAt time.Time
DeletedAt sql.NullTime
Name string
Symbol string
Decimals int64
ChainID string
Channel string
AssetType string
CoingeckoID sql.NullString
ID string `json:"id"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
DeletedAt sql.NullTime `json:"deleted_at"`
Name string `json:"name"`
Symbol string `json:"symbol"`
Decimals int64 `json:"decimals"`
ChainID string `json:"chain_id"`
Channel string `json:"channel"`
AssetType string `json:"asset_type"`
CoingeckoID sql.NullString `json:"coingecko_id"`
}
type Credential struct {
ID string
CreatedAt time.Time
UpdatedAt time.Time
DeletedAt sql.NullTime
Handle string
CredentialID string
AuthenticatorAttachment string
Origin string
Type string
Transports string
ID string `json:"id"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
DeletedAt sql.NullTime `json:"deleted_at"`
Handle string `json:"handle"`
CredentialID string `json:"credential_id"`
AuthenticatorAttachment string `json:"authenticator_attachment"`
Origin string `json:"origin"`
Type string `json:"type"`
Transports string `json:"transports"`
}
type Profile struct {
ID string
CreatedAt time.Time
UpdatedAt time.Time
DeletedAt sql.NullTime
Address string
Handle string
Origin string
Name string
ID string `json:"id"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
DeletedAt sql.NullTime `json:"deleted_at"`
Address string `json:"address"`
Handle string `json:"handle"`
Origin string `json:"origin"`
Name string `json:"name"`
}
type Session struct {
ID string
CreatedAt time.Time
UpdatedAt time.Time
DeletedAt sql.NullTime
BrowserName string
BrowserVersion string
ClientIpaddr string
Platform string
IsDesktop bool
IsMobile bool
IsTablet bool
IsTv bool
IsBot bool
Challenge string
IsHumanFirst bool
IsHumanLast bool
ProfileID int64
ID string `json:"id"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
DeletedAt sql.NullTime `json:"deleted_at"`
BrowserName string `json:"browser_name"`
BrowserVersion string `json:"browser_version"`
ClientIpaddr string `json:"client_ipaddr"`
Platform string `json:"platform"`
IsDesktop bool `json:"is_desktop"`
IsMobile bool `json:"is_mobile"`
IsTablet bool `json:"is_tablet"`
IsTv bool `json:"is_tv"`
IsBot bool `json:"is_bot"`
Challenge string `json:"challenge"`
IsHumanFirst bool `json:"is_human_first"`
IsHumanLast bool `json:"is_human_last"`
ProfileID int64 `json:"profile_id"`
}
type Vault struct {
ID string
CreatedAt time.Time
UpdatedAt time.Time
DeletedAt sql.NullTime
Handle string
Origin string
Address string
Cid string
Config string
SessionID string
RedirectUri string
ID string `json:"id"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
DeletedAt sql.NullTime `json:"deleted_at"`
Handle string `json:"handle"`
Origin string `json:"origin"`
Address string `json:"address"`
Cid string `json:"cid"`
Config string `json:"config"`
SessionID string `json:"session_id"`
RedirectUri string `json:"redirect_uri"`
}

View File

@ -0,0 +1,34 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.27.0
package motrorm
import (
"context"
)
type Querier interface {
CheckHandleExists(ctx context.Context, handle string) (bool, error)
CreateSession(ctx context.Context, arg CreateSessionParams) (Session, error)
GetChallengeBySessionID(ctx context.Context, id string) (string, error)
GetCredentialByID(ctx context.Context, credentialID string) (Credential, error)
GetCredentialsByHandle(ctx context.Context, handle string) ([]Credential, error)
GetHumanVerificationNumbers(ctx context.Context, id string) (GetHumanVerificationNumbersRow, error)
GetProfileByAddress(ctx context.Context, address string) (Profile, error)
GetProfileByHandle(ctx context.Context, handle string) (Profile, error)
GetProfileByID(ctx context.Context, id string) (Profile, error)
GetSessionByClientIP(ctx context.Context, clientIpaddr string) (Session, error)
GetSessionByID(ctx context.Context, id string) (Session, error)
GetVaultConfigByCID(ctx context.Context, cid string) (Vault, error)
GetVaultRedirectURIBySessionID(ctx context.Context, sessionID string) (string, error)
InsertCredential(ctx context.Context, arg InsertCredentialParams) (Credential, error)
InsertProfile(ctx context.Context, arg InsertProfileParams) (Profile, error)
SoftDeleteCredential(ctx context.Context, credentialID string) error
SoftDeleteProfile(ctx context.Context, address string) error
UpdateProfile(ctx context.Context, arg UpdateProfileParams) (Profile, error)
UpdateSessionHumanVerification(ctx context.Context, arg UpdateSessionHumanVerificationParams) (Session, error)
UpdateSessionWithProfileID(ctx context.Context, arg UpdateSessionWithProfileIDParams) (Session, error)
}
var _ Querier = (*Queries)(nil)

View File

@ -1,7 +1,7 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.27.0
// source: query_vault.sql
// source: query.sql
package motrorm
@ -43,20 +43,20 @@ RETURNING id, created_at, updated_at, deleted_at, browser_name, browser_version,
`
type CreateSessionParams struct {
ID string
BrowserName string
BrowserVersion string
ClientIpaddr string
Platform string
IsDesktop bool
IsMobile bool
IsTablet bool
IsTv bool
IsBot bool
Challenge string
IsHumanFirst bool
IsHumanLast bool
ProfileID int64
ID string `json:"id"`
BrowserName string `json:"browser_name"`
BrowserVersion string `json:"browser_version"`
ClientIpaddr string `json:"client_ipaddr"`
Platform string `json:"platform"`
IsDesktop bool `json:"is_desktop"`
IsMobile bool `json:"is_mobile"`
IsTablet bool `json:"is_tablet"`
IsTv bool `json:"is_tv"`
IsBot bool `json:"is_bot"`
Challenge string `json:"challenge"`
IsHumanFirst bool `json:"is_human_first"`
IsHumanLast bool `json:"is_human_last"`
ProfileID int64 `json:"profile_id"`
}
func (q *Queries) CreateSession(ctx context.Context, arg CreateSessionParams) (Session, error) {
@ -184,8 +184,8 @@ LIMIT 1
`
type GetHumanVerificationNumbersRow struct {
IsHumanFirst bool
IsHumanLast bool
IsHumanFirst bool `json:"is_human_first"`
IsHumanLast bool `json:"is_human_last"`
}
func (q *Queries) GetHumanVerificationNumbers(ctx context.Context, id string) (GetHumanVerificationNumbersRow, error) {
@ -376,11 +376,11 @@ RETURNING id, created_at, updated_at, deleted_at, handle, credential_id, authent
`
type InsertCredentialParams struct {
Handle string
CredentialID string
Origin string
Type string
Transports string
Handle string `json:"handle"`
CredentialID string `json:"credential_id"`
Origin string `json:"origin"`
Type string `json:"type"`
Transports string `json:"transports"`
}
func (q *Queries) InsertCredential(ctx context.Context, arg InsertCredentialParams) (Credential, error) {
@ -418,10 +418,10 @@ RETURNING id, created_at, updated_at, deleted_at, address, handle, origin, name
`
type InsertProfileParams struct {
Address string
Handle string
Origin string
Name string
Address string `json:"address"`
Handle string `json:"handle"`
Origin string `json:"origin"`
Name string `json:"name"`
}
func (q *Queries) InsertProfile(ctx context.Context, arg InsertProfileParams) (Profile, error) {
@ -479,9 +479,9 @@ RETURNING id, created_at, updated_at, deleted_at, address, handle, origin, name
`
type UpdateProfileParams struct {
Name string
Handle string
Address string
Name string `json:"name"`
Handle string `json:"handle"`
Address string `json:"address"`
}
func (q *Queries) UpdateProfile(ctx context.Context, arg UpdateProfileParams) (Profile, error) {
@ -511,9 +511,9 @@ RETURNING id, created_at, updated_at, deleted_at, browser_name, browser_version,
`
type UpdateSessionHumanVerificationParams struct {
IsHumanFirst bool
IsHumanLast bool
ID string
IsHumanFirst bool `json:"is_human_first"`
IsHumanLast bool `json:"is_human_last"`
ID string `json:"id"`
}
func (q *Queries) UpdateSessionHumanVerification(ctx context.Context, arg UpdateSessionHumanVerificationParams) (Session, error) {
@ -551,8 +551,8 @@ RETURNING id, created_at, updated_at, deleted_at, browser_name, browser_version,
`
type UpdateSessionWithProfileIDParams struct {
ProfileID int64
ID string
ProfileID int64 `json:"profile_id"`
ID string `json:"id"`
}
func (q *Queries) UpdateSessionWithProfileID(ctx context.Context, arg UpdateSessionWithProfileIDParams) (Session, error) {

View File

@ -98,8 +98,6 @@ CREATE TABLE vaults (
FOREIGN KEY (session_id) REFERENCES sessions(id)
);
-- Indexes for common queries
CREATE INDEX idx_profiles_handle ON profiles(handle);
CREATE INDEX idx_profiles_address ON profiles(address);

View File

@ -4,5 +4,5 @@ import (
_ "embed"
)
//go:embed schema_vault.sql
//go:embed vault/schema.sql
var SchemaVaultSQL string

View File

@ -1,19 +1,26 @@
version: "2"
sql:
- engine: "sqlite"
queries: "./sink/query_vault.sql"
schema: "./sink/schema_vault.sql"
queries: "./sink/vault/query.sql"
schema: "./sink/vault/schema.sql"
gen:
go:
emit_interface: true
emit_json_tags: true
package: "motrorm"
out: "motrorm"
- engine: "postgresql"
queries: "./sink/query_highway.sql"
schema: "./sink/schema_highway.sql"
queries: "./sink/highway/query.sql"
schema: "./sink/highway/schema.sql"
gen:
go:
emit_all_enum_values: true
emit_enum_valid_method: true
emit_json_tags: true
emit_interface: true
emit_result_struct_pointers: true
omit_unused_structs: true
package: "hwayorm"
out: "hwayorm"
sql_package: "pgx/v5"

View File

@ -12,6 +12,36 @@ message GenesisState {
Params params = 1 [(gogoproto.nullable) = false];
}
// Params defines the set of module parameters.
message Params {
option (amino.name) = "did/params";
option (gogoproto.equal) = true;
option (gogoproto.goproto_stringer) = false;
repeated Attenuation attenuations = 1;
}
// Attenuation defines the attenuation of a resource
message Attenuation {
Resource resource = 1;
repeated Capability capabilities = 2;
}
// Capability reprensents the available capabilities of a decentralized web node
message Capability {
string name = 1;
string parent = 2;
string description = 3;
repeated string resources = 4;
}
// Resource reprensents the available resources of a decentralized web node
message Resource {
string kind = 1;
string template = 2;
}
// Document defines a DID document
message Document {
string id = 1;
@ -24,13 +54,3 @@ message Document {
}
// Params defines the set of module parameters.
message Params {
option (amino.name) = "did/params";
option (gogoproto.equal) = true;
option (gogoproto.goproto_stringer) = false;
// Whitelisted Assets
}

View File

@ -18,7 +18,7 @@ service Msg {
// Spawn spawns a new Vault
rpc Spawn(MsgSpawn) returns (MsgSpawnResponse);
rpc Initialize(MsgInitialize) returns (MsgInitializeResponse);
}
// MsgUpdateParams is the Msg/UpdateParams request type.
@ -46,7 +46,7 @@ message MsgUpdateParamsResponse {}
// operation that must be performed interacting with the Vault.
//
// Since: cosmos-sdk 0.47
message MsgSpawn {
message MsgInitialize {
option (cosmos.msg.v1.signer) = "authority";
// authority is the address of the governance account.
@ -62,4 +62,4 @@ message MsgSpawn {
// MsgSpawn message.
//
// Since: cosmos-sdk 0.47
message MsgSpawnResponse {}
message MsgInitializeResponse {}

View File

@ -18,32 +18,37 @@ message Params {
option (gogoproto.equal) = true;
option (gogoproto.goproto_stringer) = false;
ServiceCategories categories = 1;
ServiceTypes types = 2;
repeated Attenuation attenuations = 1;
}
message ServiceCategories {
option (amino.name) = "service/categories";
option (gogoproto.equal) = true;
repeated string categories = 1;
// Attenuation defines the attenuation of a resource
message Attenuation {
Resource resource = 1;
repeated Capability capabilities = 2;
}
message ServiceTypes {
option (amino.name) = "service/types";
option (gogoproto.equal) = true;
// Capability reprensents the available capabilities of a decentralized web node
message Capability {
string name = 1;
string parent = 2;
string description = 3;
repeated string resources = 4;
}
repeated string types = 1;
// Resource reprensents the available resources of a decentralized web node
message Resource {
string kind = 1;
string template = 2;
}
// Service defines a Decentralized Service on the Sonr Blockchain
message Service {
string id = 1;
string authority = 2;
string origin = 3;
repeated string origins = 3;
string name = 4;
string description = 5;
string category = 6;
repeated Attenuation attenuations = 6;
repeated string tags = 7;
int64 expiry_height = 8;
}

View File

@ -12,6 +12,16 @@ service Query {
rpc Params(QueryParamsRequest) returns (QueryParamsResponse) {
option (google.api.http).get = "/svc/v1/params";
}
// OriginExists queries if a given origin exists.
rpc OriginExists(QueryOriginExistsRequest) returns (QueryOriginExistsResponse) {
option (google.api.http).get = "/svc/v1/origins/{origin}";
}
// ResolveOrigin queries the domain of a given service and returns its record with capabilities.
rpc ResolveOrigin(QueryResolveOriginRequest) returns (QueryResolveOriginResponse) {
option (google.api.http).get = "/svc/v1/origins/{origin}/record";
}
}
// QueryParamsRequest is the request type for the Query/Params RPC method.
@ -22,3 +32,27 @@ message QueryParamsResponse {
// params defines the parameters of the module.
Params params = 1;
}
// QueryOriginExistsRequest is the request type for the Query/OriginExists RPC method.
message QueryOriginExistsRequest {
// origin is the origin to query.
string origin = 1;
}
// QueryOriginExistsResponse is the response type for the Query/OriginExists RPC method.
message QueryOriginExistsResponse {
// exists is the boolean value representing whether the origin exists.
bool exists = 1;
}
// QueryResolveOriginRequest is the request type for the Query/ResolveOrigin RPC method.
message QueryResolveOriginRequest {
// origin is the origin to query.
string origin = 1;
}
// QueryResolveOriginResponse is the response type for the Query/ResolveOrigin RPC method.
message QueryResolveOriginResponse {
// record is the record of the origin.
Service record = 1;
}

View File

@ -1,6 +1,6 @@
# `x/did`
The Decentralized Identity module is responsible for managing native Sonr Accounts, their derived wallets, and associated user identification information.
The Decentralized Identity module is responsible for managing native Sonr Accounts, their derived wallets, and associated user identification information. This module now incorporates UCAN (User Controlled Authorization Networks) for enhanced authorization and access control.
## State
@ -75,13 +75,13 @@ The DID module defines the following messages:
5. MsgUnlinkAuthentication
6. MsgUpdateParams
Each message triggers specific state machine behaviors related to managing DIDs, authentications, assertions, and module parameters.
Each message triggers specific state machine behaviors related to managing DIDs, authentications, assertions, and module parameters. These messages now also involve UCAN authorization checks where applicable.
## Query
The DID module provides the following query endpoints:
1. Params: Query all parameters of the module
1. Params: Query all parameters of the module, including UCAN-related parameters.
2. Resolve: Query the DID document by its ID
3. Sign: Sign a message with the DID document
4. Verify: Verify a message with the DID document
@ -92,18 +92,41 @@ The module parameters include:
- Allowed public keys (map of KeyInfo)
- Conveyance preference
- Attestation formats
- UCAN Authorization Parameters:
- `UcanPermissions`: Specifies the required UCAN permissions for various actions within the module.
## Client
The module provides gRPC and REST endpoints for all defined messages and queries.
## UCAN Authorization
This module utilizes UCAN (User Controlled Authorization Networks) to provide a decentralized and user-centric authorization mechanism. UCANs are self-contained authorization tokens that allow users to delegate specific capabilities to other entities without relying on a central authority.
### UCAN Integration
- The module parameters include a `UcanPermissions` field that defines the default UCAN permissions required for actions within the module.
- Message handlers in the `MsgServer` perform UCAN authorization checks by:
- Retrieving the UCAN permissions from the context (injected by a middleware).
- Retrieving the required UCAN permissions from the module parameters.
- Verifying that the provided UCAN permissions satisfy the required permissions.
- A dedicated middleware is responsible for:
- Parsing incoming requests for UCAN tokens.
- Verifying UCAN token signatures and validity.
- Extracting UCAN permissions.
- Injecting UCAN permissions into the context.
- UCAN verification logic involves:
- Checking UCAN token signatures against the issuer's public key (resolved via the `x/did` module).
- Validating token expiration and other constraints.
- Parsing token capabilities and extracting relevant permissions.
## Future Improvements
Potential future improvements could include:
1. Enhanced privacy features for DID operations
1. Enhanced privacy features for DID operations, potentially leveraging UCAN capabilities for privacy-preserving authorization.
2. Integration with more blockchain networks
3. Support for additional key types and cryptographic algorithms
4. Improved revocation mechanisms for credentials and assertions
4. Improved revocation mechanisms for credentials, assertions, and UCAN tokens.
## Tests

View File

@ -43,3 +43,64 @@ func (gs GenesisState) Validate() error {
return gs.Params.Validate()
}
// Equal checks if two Attenuation are equal
func (a *Attenuation) Equal(that *Attenuation) bool {
if that == nil {
return false
}
if a.Resource != nil {
if that.Resource == nil {
return false
}
if !a.Resource.Equal(that.Resource) {
return false
}
}
if len(a.Capabilities) != len(that.Capabilities) {
return false
}
for i := range a.Capabilities {
if !a.Capabilities[i].Equal(that.Capabilities[i]) {
return false
}
}
return true
}
// Equal checks if two Capability are equal
func (c *Capability) Equal(that *Capability) bool {
if that == nil {
return false
}
if c.Name != that.Name {
return false
}
if c.Parent != that.Parent {
return false
}
// TODO: check description
if len(c.Resources) != len(that.Resources) {
return false
}
for i := range c.Resources {
if c.Resources[i] != that.Resources[i] {
return false
}
}
return true
}
// Equal checks if two Resource are equal
func (r *Resource) Equal(that *Resource) bool {
if that == nil {
return false
}
if r.Kind != that.Kind {
return false
}
if r.Template != that.Template {
return false
}
return true
}

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
# `x/dwn`
The DWN module is responsible for the management of IPFS deployed Decentralized Web Nodes (DWNs) and their associated data.
The DWN module is responsible for the management of IPFS deployed Decentralized Web Nodes (DWNs) and their associated data. This module now incorporates UCAN (User Controlled Authorization Networks) for enhanced authorization and access control.
## Concepts
@ -9,6 +9,7 @@ The DWN module introduces several key concepts:
1. Decentralized Web Node (DWN): A distributed network for storing and sharing data.
2. Schema: A structure defining the format of various data types in the dwn.
3. IPFS Integration: The module can interact with IPFS for decentralized data storage.
4. UCAN Authorization: The module utilizes UCANs for a decentralized and user-centric authorization mechanism.
## State
@ -38,6 +39,7 @@ message Params {
bool ipfs_active = 1;
bool local_registration_enabled = 2;
Schema schema = 4;
repeated string allowed_operators = 5;
}
```
@ -64,15 +66,15 @@ message Schema {
State transitions in the DWN module are primarily triggered by:
1. Updating module parameters
2. Allocating new dwns
3. Syncing DID documents
1. Updating module parameters (including UCAN-related parameters)
2. Allocating new dwns (with UCAN authorization checks)
3. Syncing DID documents (with UCAN authorization checks)
## Messages
The DWN module defines the following message:
1. `MsgUpdateParams`: Used to update the module parameters.
1. `MsgUpdateParams`: Used to update the module parameters, including UCAN permissions.
```protobuf
message MsgUpdateParams {
@ -89,22 +91,43 @@ No specific begin-block operations are defined for this module.
No specific end-block operations are defined for this module.
## UCAN Authorization
This module utilizes UCAN (User Controlled Authorization Networks) to provide a decentralized and user-centric authorization mechanism. UCANs are self-contained authorization tokens that allow users to delegate specific capabilities to other entities without relying on a central authority.
### UCAN Integration
- The module parameters include a `UcanPermissions` field that defines the default UCAN permissions required for actions within the module, such as allocating new DWNs or syncing DID documents.
- Message handlers in the `MsgServer` perform UCAN authorization checks by:
- Retrieving the UCAN permissions from the context (injected by a middleware).
- Retrieving the required UCAN permissions from the module parameters.
- Verifying that the provided UCAN permissions satisfy the required permissions.
- A dedicated middleware is responsible for:
- Parsing incoming requests for UCAN tokens.
- Verifying UCAN token signatures and validity.
- Extracting UCAN permissions.
- Injecting UCAN permissions into the context.
- UCAN verification logic involves:
- Checking UCAN token signatures against the issuer's public key (resolved via the `x/did` module).
- Validating token expiration and other constraints.
- Parsing token capabilities and extracting relevant permissions.
## Hooks
The DWN module does not define any hooks.
## Events
The DWN module does not explicitly define any events. However, standard Cosmos SDK events may be emitted during state transitions.
The DWN module does not explicitly define any events. However, standard Cosmos SDK events may be emitted during state transitions, including those related to UCAN authorization.
## Client
The DWN module provides the following gRPC query endpoints:
1. `Params`: Queries all parameters of the module.
1. `Params`: Queries all parameters of the module, including UCAN-related parameters.
2. `Schema`: Queries the DID document schema.
3. `Allocate`: Initializes a Target DWN available for claims.
4. `Sync`: Queries the DID document by its ID and returns required information.
3. `Allocate`: Initializes a Target DWN available for claims (subject to UCAN authorization).
4. `Sync`: Queries the DID document by its ID and returns required information (subject to UCAN authorization).
## Params
@ -113,6 +136,7 @@ The module parameters include:
- `ipfs_active` (bool): Indicates if IPFS integration is active.
- `local_registration_enabled` (bool): Indicates if local registration is enabled.
- `schema` (Schema): Defines the structure for various data types in the dwn.
- `UcanPermissions`: Specifies the required UCAN permissions for various actions within the module.
## Future Improvements

View File

@ -34,10 +34,3 @@ func (ms msgServer) Initialize(ctx context.Context, msg *types.MsgInitialize) (*
panic("Initialize is unimplemented")
return &types.MsgInitializeResponse{}, nil
}
// Spawn implements types.MsgServer.
func (ms msgServer) Spawn(ctx context.Context, msg *types.MsgSpawn) (*types.MsgSpawnResponse, error) {
// ctx := sdk.UnwrapSDKContext(goCtx)
panic("Spawn is unimplemented")
return &types.MsgSpawnResponse{}, nil
}

View File

@ -133,7 +133,7 @@ var xxx_messageInfo_MsgUpdateParamsResponse proto.InternalMessageInfo
// operation that must be performed interacting with the Vault.
//
// Since: cosmos-sdk 0.47
type MsgSpawn struct {
type MsgInitialize struct {
// authority is the address of the governance account.
Authority string `protobuf:"bytes,1,opt,name=authority,proto3" json:"authority,omitempty"`
// params defines the parameters to update.
@ -142,18 +142,18 @@ type MsgSpawn struct {
Params Params `protobuf:"bytes,2,opt,name=params,proto3" json:"params"`
}
func (m *MsgSpawn) Reset() { *m = MsgSpawn{} }
func (m *MsgSpawn) String() string { return proto.CompactTextString(m) }
func (*MsgSpawn) ProtoMessage() {}
func (*MsgSpawn) Descriptor() ([]byte, []int) {
func (m *MsgInitialize) Reset() { *m = MsgInitialize{} }
func (m *MsgInitialize) String() string { return proto.CompactTextString(m) }
func (*MsgInitialize) ProtoMessage() {}
func (*MsgInitialize) Descriptor() ([]byte, []int) {
return fileDescriptor_32d2464465560de7, []int{2}
}
func (m *MsgSpawn) XXX_Unmarshal(b []byte) error {
func (m *MsgInitialize) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *MsgSpawn) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
func (m *MsgInitialize) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_MsgSpawn.Marshal(b, m, deterministic)
return xxx_messageInfo_MsgInitialize.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
@ -163,26 +163,26 @@ func (m *MsgSpawn) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return b[:n], nil
}
}
func (m *MsgSpawn) XXX_Merge(src proto.Message) {
xxx_messageInfo_MsgSpawn.Merge(m, src)
func (m *MsgInitialize) XXX_Merge(src proto.Message) {
xxx_messageInfo_MsgInitialize.Merge(m, src)
}
func (m *MsgSpawn) XXX_Size() int {
func (m *MsgInitialize) XXX_Size() int {
return m.Size()
}
func (m *MsgSpawn) XXX_DiscardUnknown() {
xxx_messageInfo_MsgSpawn.DiscardUnknown(m)
func (m *MsgInitialize) XXX_DiscardUnknown() {
xxx_messageInfo_MsgInitialize.DiscardUnknown(m)
}
var xxx_messageInfo_MsgSpawn proto.InternalMessageInfo
var xxx_messageInfo_MsgInitialize proto.InternalMessageInfo
func (m *MsgSpawn) GetAuthority() string {
func (m *MsgInitialize) GetAuthority() string {
if m != nil {
return m.Authority
}
return ""
}
func (m *MsgSpawn) GetParams() Params {
func (m *MsgInitialize) GetParams() Params {
if m != nil {
return m.Params
}
@ -193,21 +193,21 @@ func (m *MsgSpawn) GetParams() Params {
// MsgSpawn message.
//
// Since: cosmos-sdk 0.47
type MsgSpawnResponse struct {
type MsgInitializeResponse struct {
}
func (m *MsgSpawnResponse) Reset() { *m = MsgSpawnResponse{} }
func (m *MsgSpawnResponse) String() string { return proto.CompactTextString(m) }
func (*MsgSpawnResponse) ProtoMessage() {}
func (*MsgSpawnResponse) Descriptor() ([]byte, []int) {
func (m *MsgInitializeResponse) Reset() { *m = MsgInitializeResponse{} }
func (m *MsgInitializeResponse) String() string { return proto.CompactTextString(m) }
func (*MsgInitializeResponse) ProtoMessage() {}
func (*MsgInitializeResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_32d2464465560de7, []int{3}
}
func (m *MsgSpawnResponse) XXX_Unmarshal(b []byte) error {
func (m *MsgInitializeResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *MsgSpawnResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
func (m *MsgInitializeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_MsgSpawnResponse.Marshal(b, m, deterministic)
return xxx_messageInfo_MsgInitializeResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
@ -217,29 +217,29 @@ func (m *MsgSpawnResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, er
return b[:n], nil
}
}
func (m *MsgSpawnResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_MsgSpawnResponse.Merge(m, src)
func (m *MsgInitializeResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_MsgInitializeResponse.Merge(m, src)
}
func (m *MsgSpawnResponse) XXX_Size() int {
func (m *MsgInitializeResponse) XXX_Size() int {
return m.Size()
}
func (m *MsgSpawnResponse) XXX_DiscardUnknown() {
xxx_messageInfo_MsgSpawnResponse.DiscardUnknown(m)
func (m *MsgInitializeResponse) XXX_DiscardUnknown() {
xxx_messageInfo_MsgInitializeResponse.DiscardUnknown(m)
}
var xxx_messageInfo_MsgSpawnResponse proto.InternalMessageInfo
var xxx_messageInfo_MsgInitializeResponse proto.InternalMessageInfo
func init() {
proto.RegisterType((*MsgUpdateParams)(nil), "dwn.v1.MsgUpdateParams")
proto.RegisterType((*MsgUpdateParamsResponse)(nil), "dwn.v1.MsgUpdateParamsResponse")
proto.RegisterType((*MsgSpawn)(nil), "dwn.v1.MsgSpawn")
proto.RegisterType((*MsgSpawnResponse)(nil), "dwn.v1.MsgSpawnResponse")
proto.RegisterType((*MsgInitialize)(nil), "dwn.v1.MsgInitialize")
proto.RegisterType((*MsgInitializeResponse)(nil), "dwn.v1.MsgInitializeResponse")
}
func init() { proto.RegisterFile("dwn/v1/tx.proto", fileDescriptor_32d2464465560de7) }
var fileDescriptor_32d2464465560de7 = []byte{
// 356 bytes of a gzipped FileDescriptorProto
// 361 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4f, 0x29, 0xcf, 0xd3,
0x2f, 0x33, 0xd4, 0x2f, 0xa9, 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x4b, 0x29, 0xcf,
0xd3, 0x2b, 0x33, 0x94, 0x12, 0x4f, 0xce, 0x2f, 0xce, 0xcd, 0x2f, 0xd6, 0xcf, 0x2d, 0x4e, 0x07,
@ -252,17 +252,17 @@ var fileDescriptor_32d2464465560de7 = []byte{
0xca, 0xcc, 0x4b, 0x0f, 0x42, 0x28, 0x15, 0xd2, 0xe1, 0x62, 0x2b, 0x00, 0x9b, 0x20, 0xc1, 0xa4,
0xc0, 0xa8, 0xc1, 0x6d, 0xc4, 0xa7, 0x07, 0xf1, 0x84, 0x1e, 0xc4, 0x5c, 0x27, 0x96, 0x13, 0xf7,
0xe4, 0x19, 0x82, 0xa0, 0x6a, 0xac, 0xf8, 0x9a, 0x9e, 0x6f, 0xd0, 0x42, 0xe8, 0x56, 0x92, 0xe4,
0x12, 0x47, 0x73, 0x48, 0x50, 0x6a, 0x71, 0x41, 0x7e, 0x5e, 0x71, 0xaa, 0x52, 0x03, 0x23, 0x17,
0x87, 0x6f, 0x71, 0x7a, 0x70, 0x41, 0x62, 0x79, 0xde, 0x00, 0xb9, 0x4e, 0x88, 0x4b, 0x00, 0xe6,
0x02, 0x98, 0xb3, 0x8c, 0xba, 0x19, 0xb9, 0x98, 0x7d, 0x8b, 0xd3, 0x85, 0x3c, 0xb8, 0x78, 0x50,
0xc2, 0x4f, 0x1c, 0x66, 0x32, 0x9a, 0x7f, 0xa4, 0xe4, 0x71, 0x48, 0xc0, 0x4c, 0x14, 0x32, 0xe6,
0x62, 0x85, 0x78, 0x52, 0x00, 0x49, 0x25, 0x58, 0x44, 0x4a, 0x02, 0x5d, 0x04, 0xa6, 0x49, 0x8a,
0xb5, 0xe1, 0xf9, 0x06, 0x2d, 0x46, 0x27, 0x9b, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63,
0x7c, 0xf0, 0x48, 0x8e, 0x71, 0xc2, 0x63, 0x39, 0x86, 0x0b, 0x8f, 0xe5, 0x18, 0x6e, 0x3c, 0x96,
0x63, 0x88, 0x52, 0x4a, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0xcf, 0xcf,
0x2b, 0xce, 0xcf, 0x2b, 0xd2, 0x07, 0x13, 0x15, 0xfa, 0xa0, 0x34, 0x54, 0x52, 0x59, 0x90, 0x5a,
0x9c, 0xc4, 0x06, 0x4e, 0x0e, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x38, 0x9d, 0x99, 0x0f,
0x89, 0x02, 0x00, 0x00,
0x12, 0x47, 0x73, 0x48, 0x50, 0x6a, 0x71, 0x41, 0x7e, 0x5e, 0x71, 0xaa, 0x52, 0x2b, 0x23, 0x17,
0xaf, 0x6f, 0x71, 0xba, 0x67, 0x5e, 0x66, 0x49, 0x66, 0x62, 0x4e, 0x66, 0x55, 0xea, 0x00, 0x39,
0x51, 0x9c, 0x4b, 0x14, 0xc5, 0x19, 0x30, 0x07, 0x1a, 0xcd, 0x62, 0xe4, 0x62, 0xf6, 0x2d, 0x4e,
0x17, 0xf2, 0xe0, 0xe2, 0x41, 0x09, 0x49, 0x71, 0x98, 0xf1, 0x68, 0x3e, 0x93, 0x92, 0xc7, 0x21,
0x01, 0x33, 0x51, 0xc8, 0x89, 0x8b, 0x0b, 0xc9, 0xbb, 0xa2, 0x48, 0xca, 0x11, 0xc2, 0x52, 0xb2,
0x58, 0x85, 0x61, 0x66, 0x48, 0xb1, 0x36, 0x3c, 0xdf, 0xa0, 0xc5, 0xe8, 0x64, 0x73, 0xe2, 0x91,
0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0x4e, 0x78, 0x2c, 0xc7, 0x70, 0xe1,
0xb1, 0x1c, 0xc3, 0x8d, 0xc7, 0x72, 0x0c, 0x51, 0x4a, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a,
0xc9, 0xf9, 0xb9, 0xfa, 0xf9, 0x79, 0xc5, 0xf9, 0x79, 0x45, 0xfa, 0x60, 0xa2, 0x42, 0x1f, 0x94,
0xb8, 0x4a, 0x2a, 0x0b, 0x52, 0x8b, 0x93, 0xd8, 0xc0, 0xe9, 0xc4, 0x18, 0x10, 0x00, 0x00, 0xff,
0xff, 0xbb, 0x62, 0x4e, 0x8b, 0xa2, 0x02, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
@ -282,7 +282,7 @@ type MsgClient interface {
// Since: cosmos-sdk 0.47
UpdateParams(ctx context.Context, in *MsgUpdateParams, opts ...grpc.CallOption) (*MsgUpdateParamsResponse, error)
// Spawn spawns a new Vault
Spawn(ctx context.Context, in *MsgSpawn, opts ...grpc.CallOption) (*MsgSpawnResponse, error)
Initialize(ctx context.Context, in *MsgInitialize, opts ...grpc.CallOption) (*MsgInitializeResponse, error)
}
type msgClient struct {
@ -302,9 +302,9 @@ func (c *msgClient) UpdateParams(ctx context.Context, in *MsgUpdateParams, opts
return out, nil
}
func (c *msgClient) Spawn(ctx context.Context, in *MsgSpawn, opts ...grpc.CallOption) (*MsgSpawnResponse, error) {
out := new(MsgSpawnResponse)
err := c.cc.Invoke(ctx, "/dwn.v1.Msg/Spawn", in, out, opts...)
func (c *msgClient) Initialize(ctx context.Context, in *MsgInitialize, opts ...grpc.CallOption) (*MsgInitializeResponse, error) {
out := new(MsgInitializeResponse)
err := c.cc.Invoke(ctx, "/dwn.v1.Msg/Initialize", in, out, opts...)
if err != nil {
return nil, err
}
@ -318,7 +318,7 @@ type MsgServer interface {
// Since: cosmos-sdk 0.47
UpdateParams(context.Context, *MsgUpdateParams) (*MsgUpdateParamsResponse, error)
// Spawn spawns a new Vault
Spawn(context.Context, *MsgSpawn) (*MsgSpawnResponse, error)
Initialize(context.Context, *MsgInitialize) (*MsgInitializeResponse, error)
}
// UnimplementedMsgServer can be embedded to have forward compatible implementations.
@ -328,8 +328,8 @@ type UnimplementedMsgServer struct {
func (*UnimplementedMsgServer) UpdateParams(ctx context.Context, req *MsgUpdateParams) (*MsgUpdateParamsResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method UpdateParams not implemented")
}
func (*UnimplementedMsgServer) Spawn(ctx context.Context, req *MsgSpawn) (*MsgSpawnResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Spawn not implemented")
func (*UnimplementedMsgServer) Initialize(ctx context.Context, req *MsgInitialize) (*MsgInitializeResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Initialize not implemented")
}
func RegisterMsgServer(s grpc1.Server, srv MsgServer) {
@ -354,20 +354,20 @@ func _Msg_UpdateParams_Handler(srv interface{}, ctx context.Context, dec func(in
return interceptor(ctx, in, info, handler)
}
func _Msg_Spawn_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(MsgSpawn)
func _Msg_Initialize_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(MsgInitialize)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(MsgServer).Spawn(ctx, in)
return srv.(MsgServer).Initialize(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/dwn.v1.Msg/Spawn",
FullMethod: "/dwn.v1.Msg/Initialize",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(MsgServer).Spawn(ctx, req.(*MsgSpawn))
return srv.(MsgServer).Initialize(ctx, req.(*MsgInitialize))
}
return interceptor(ctx, in, info, handler)
}
@ -382,8 +382,8 @@ var _Msg_serviceDesc = grpc.ServiceDesc{
Handler: _Msg_UpdateParams_Handler,
},
{
MethodName: "Spawn",
Handler: _Msg_Spawn_Handler,
MethodName: "Initialize",
Handler: _Msg_Initialize_Handler,
},
},
Streams: []grpc.StreamDesc{},
@ -453,7 +453,7 @@ func (m *MsgUpdateParamsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error)
return len(dAtA) - i, nil
}
func (m *MsgSpawn) Marshal() (dAtA []byte, err error) {
func (m *MsgInitialize) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@ -463,12 +463,12 @@ func (m *MsgSpawn) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
func (m *MsgSpawn) MarshalTo(dAtA []byte) (int, error) {
func (m *MsgInitialize) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *MsgSpawn) MarshalToSizedBuffer(dAtA []byte) (int, error) {
func (m *MsgInitialize) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
@ -493,7 +493,7 @@ func (m *MsgSpawn) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
func (m *MsgSpawnResponse) Marshal() (dAtA []byte, err error) {
func (m *MsgInitializeResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@ -503,12 +503,12 @@ func (m *MsgSpawnResponse) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
func (m *MsgSpawnResponse) MarshalTo(dAtA []byte) (int, error) {
func (m *MsgInitializeResponse) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *MsgSpawnResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
func (m *MsgInitializeResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
@ -551,7 +551,7 @@ func (m *MsgUpdateParamsResponse) Size() (n int) {
return n
}
func (m *MsgSpawn) Size() (n int) {
func (m *MsgInitialize) Size() (n int) {
if m == nil {
return 0
}
@ -566,7 +566,7 @@ func (m *MsgSpawn) Size() (n int) {
return n
}
func (m *MsgSpawnResponse) Size() (n int) {
func (m *MsgInitializeResponse) Size() (n int) {
if m == nil {
return 0
}
@ -746,7 +746,7 @@ func (m *MsgUpdateParamsResponse) Unmarshal(dAtA []byte) error {
}
return nil
}
func (m *MsgSpawn) Unmarshal(dAtA []byte) error {
func (m *MsgInitialize) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@ -769,10 +769,10 @@ func (m *MsgSpawn) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: MsgSpawn: wiretype end group for non-group")
return fmt.Errorf("proto: MsgInitialize: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: MsgSpawn: illegal tag %d (wire type %d)", fieldNum, wire)
return fmt.Errorf("proto: MsgInitialize: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
@ -861,7 +861,7 @@ func (m *MsgSpawn) Unmarshal(dAtA []byte) error {
}
return nil
}
func (m *MsgSpawnResponse) Unmarshal(dAtA []byte) error {
func (m *MsgInitializeResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@ -884,10 +884,10 @@ func (m *MsgSpawnResponse) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: MsgSpawnResponse: wiretype end group for non-group")
return fmt.Errorf("proto: MsgInitializeResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: MsgSpawnResponse: illegal tag %d (wire type %d)", fieldNum, wire)
return fmt.Errorf("proto: MsgInitializeResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
default:

View File

@ -1,12 +1,13 @@
# `x/svc`
The svc module is responsible for managing the registration and authorization of services within the Sonr ecosystem. It provides a secure and verifiable mechanism for registering and authorizing services using Decentralized Identifiers (DIDs).
The svc module is responsible for managing the registration and authorization of services within the Sonr ecosystem. It provides a secure and verifiable mechanism for registering and authorizing services using Decentralized Identifiers (DIDs) and now incorporates UCAN (User Controlled Authorization Networks) for enhanced authorization capabilities.
## Concepts
- **Service**: A decentralized svc on the Sonr Blockchain with properties such as ID, authority, origin, name, description, category, tags, and expiry height.
- **Profile**: Represents a DID alias with properties like ID, subject, origin, and controller.
- **Metadata**: Contains information about a svc, including name, description, category, icon, and tags.
- **UCAN Authorization**: The module utilizes UCANs for a decentralized and user-centric authorization mechanism.
### Dependencies
@ -38,11 +39,11 @@ Stores DID alias information:
### MsgUpdateParams
Updates the module parameters. Can only be executed by the governance account.
Updates the module parameters, including UCAN-related parameters. Can only be executed by the governance account.
### MsgRegisterService
Registers a new svc on the blockchain. Requires a valid TXT record in DNS for the origin.
Registers a new svc on the blockchain. Requires a valid TXT record in DNS for the origin and may be subject to UCAN authorization checks.
## Params
@ -50,6 +51,7 @@ The module has the following parameters:
- `categories`: List of allowed svc categories
- `types`: List of allowed svc types
- `UcanPermissions`: Specifies the required UCAN permissions for various actions within the module, such as registering a service.
## Query
@ -57,7 +59,7 @@ The module provides the following query:
### Params
Retrieves all parameters of the module.
Retrieves all parameters of the module, including UCAN-related parameters.
## Client
@ -65,7 +67,7 @@ Retrieves all parameters of the module.
The module provides a gRPC Query svc with the following RPC:
- `Params`: Get all parameters of the module
- `Params`: Get all parameters of the module, including UCAN-related parameters.
### CLI
@ -73,7 +75,28 @@ The module provides a gRPC Query svc with the following RPC:
## Events
(TODO: List and describe event tags used by the module)
(TODO: List and describe event tags used by the module, including those related to UCAN authorization)
## UCAN Authorization
This module utilizes UCAN (User Controlled Authorization Networks) to provide a decentralized and user-centric authorization mechanism. UCANs are self-contained authorization tokens that allow users to delegate specific capabilities to other entities without relying on a central authority.
### UCAN Integration
- The module parameters include a `UcanPermissions` field that defines the default UCAN permissions required for actions within the module.
- Message handlers in the `MsgServer` perform UCAN authorization checks by:
- Retrieving the UCAN permissions from the context (injected by a middleware).
- Retrieving the required UCAN permissions from the module parameters.
- Verifying that the provided UCAN permissions satisfy the required permissions.
- A dedicated middleware is responsible for:
- Parsing incoming requests for UCAN tokens.
- Verifying UCAN token signatures and validity.
- Extracting UCAN permissions.
- Injecting UCAN permissions into the context.
- UCAN verification logic involves:
- Checking UCAN token signatures against the issuer's public key (resolved via the `x/did` module).
- Validating token expiration and other constraints.
- Parsing token capabilities and extracting relevant permissions.
## Future Improvements

View File

@ -28,3 +28,17 @@ func (k Querier) Params(c context.Context, req *types.QueryParamsRequest) (*type
return &types.QueryParamsResponse{Params: &p}, nil
}
// OriginExists implements types.QueryServer.
func (k Querier) OriginExists(goCtx context.Context, req *types.QueryOriginExistsRequest) (*types.QueryOriginExistsResponse, error) {
// ctx := sdk.UnwrapSDKContext(goCtx)
panic("OriginExists is unimplemented")
return &types.QueryOriginExistsResponse{}, nil
}
// ResolveOrigin implements types.QueryServer.
func (k Querier) ResolveOrigin(goCtx context.Context, req *types.QueryResolveOriginRequest) (*types.QueryResolveOriginResponse, error) {
// ctx := sdk.UnwrapSDKContext(goCtx)
panic("ResolveOrigin is unimplemented")
return &types.QueryResolveOriginResponse{}, nil
}

View File

@ -6,7 +6,6 @@ const DefaultIndex uint64 = 1
// DefaultGenesis returns the default genesis state
func DefaultGenesis() *GenesisState {
return &GenesisState{
Params: DefaultParams(),
}
}
@ -14,6 +13,66 @@ func DefaultGenesis() *GenesisState {
// Validate performs basic genesis state validation returning an error upon any
// failure.
func (gs GenesisState) Validate() error {
return gs.Params.Validate()
}
// Equal checks if two Attenuation are equal
func (a *Attenuation) Equal(that *Attenuation) bool {
if that == nil {
return false
}
if a.Resource != nil {
if that.Resource == nil {
return false
}
if !a.Resource.Equal(that.Resource) {
return false
}
}
if len(a.Capabilities) != len(that.Capabilities) {
return false
}
for i := range a.Capabilities {
if !a.Capabilities[i].Equal(that.Capabilities[i]) {
return false
}
}
return true
}
// Equal checks if two Capability are equal
func (c *Capability) Equal(that *Capability) bool {
if that == nil {
return false
}
if c.Name != that.Name {
return false
}
if c.Parent != that.Parent {
return false
}
// TODO: check description
if len(c.Resources) != len(that.Resources) {
return false
}
for i := range c.Resources {
if c.Resources[i] != that.Resources[i] {
return false
}
}
return true
}
// Equal checks if two Resource are equal
func (r *Resource) Equal(that *Resource) bool {
if that == nil {
return false
}
if r.Kind != that.Kind {
return false
}
if r.Template != that.Template {
return false
}
return true
}

File diff suppressed because it is too large Load Diff

View File

@ -25,3 +25,5 @@ func (p Params) Validate() error {
// TODO:
return nil
}
// DefaultAttenuations returns the default Attenuation

View File

@ -111,31 +111,228 @@ func (m *QueryParamsResponse) GetParams() *Params {
return nil
}
// QueryOriginExistsRequest is the request type for the Query/OriginExists RPC method.
type QueryOriginExistsRequest struct {
// origin is the origin to query.
Origin string `protobuf:"bytes,1,opt,name=origin,proto3" json:"origin,omitempty"`
}
func (m *QueryOriginExistsRequest) Reset() { *m = QueryOriginExistsRequest{} }
func (m *QueryOriginExistsRequest) String() string { return proto.CompactTextString(m) }
func (*QueryOriginExistsRequest) ProtoMessage() {}
func (*QueryOriginExistsRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_81a1010cdbf4bc9c, []int{2}
}
func (m *QueryOriginExistsRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *QueryOriginExistsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_QueryOriginExistsRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *QueryOriginExistsRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_QueryOriginExistsRequest.Merge(m, src)
}
func (m *QueryOriginExistsRequest) XXX_Size() int {
return m.Size()
}
func (m *QueryOriginExistsRequest) XXX_DiscardUnknown() {
xxx_messageInfo_QueryOriginExistsRequest.DiscardUnknown(m)
}
var xxx_messageInfo_QueryOriginExistsRequest proto.InternalMessageInfo
func (m *QueryOriginExistsRequest) GetOrigin() string {
if m != nil {
return m.Origin
}
return ""
}
// QueryOriginExistsResponse is the response type for the Query/OriginExists RPC method.
type QueryOriginExistsResponse struct {
// exists is the boolean value representing whether the origin exists.
Exists bool `protobuf:"varint,1,opt,name=exists,proto3" json:"exists,omitempty"`
}
func (m *QueryOriginExistsResponse) Reset() { *m = QueryOriginExistsResponse{} }
func (m *QueryOriginExistsResponse) String() string { return proto.CompactTextString(m) }
func (*QueryOriginExistsResponse) ProtoMessage() {}
func (*QueryOriginExistsResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_81a1010cdbf4bc9c, []int{3}
}
func (m *QueryOriginExistsResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *QueryOriginExistsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_QueryOriginExistsResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *QueryOriginExistsResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_QueryOriginExistsResponse.Merge(m, src)
}
func (m *QueryOriginExistsResponse) XXX_Size() int {
return m.Size()
}
func (m *QueryOriginExistsResponse) XXX_DiscardUnknown() {
xxx_messageInfo_QueryOriginExistsResponse.DiscardUnknown(m)
}
var xxx_messageInfo_QueryOriginExistsResponse proto.InternalMessageInfo
func (m *QueryOriginExistsResponse) GetExists() bool {
if m != nil {
return m.Exists
}
return false
}
// QueryResolveOriginRequest is the request type for the Query/ResolveOrigin RPC method.
type QueryResolveOriginRequest struct {
// origin is the origin to query.
Origin string `protobuf:"bytes,1,opt,name=origin,proto3" json:"origin,omitempty"`
}
func (m *QueryResolveOriginRequest) Reset() { *m = QueryResolveOriginRequest{} }
func (m *QueryResolveOriginRequest) String() string { return proto.CompactTextString(m) }
func (*QueryResolveOriginRequest) ProtoMessage() {}
func (*QueryResolveOriginRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_81a1010cdbf4bc9c, []int{4}
}
func (m *QueryResolveOriginRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *QueryResolveOriginRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_QueryResolveOriginRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *QueryResolveOriginRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_QueryResolveOriginRequest.Merge(m, src)
}
func (m *QueryResolveOriginRequest) XXX_Size() int {
return m.Size()
}
func (m *QueryResolveOriginRequest) XXX_DiscardUnknown() {
xxx_messageInfo_QueryResolveOriginRequest.DiscardUnknown(m)
}
var xxx_messageInfo_QueryResolveOriginRequest proto.InternalMessageInfo
func (m *QueryResolveOriginRequest) GetOrigin() string {
if m != nil {
return m.Origin
}
return ""
}
// QueryResolveOriginResponse is the response type for the Query/ResolveOrigin RPC method.
type QueryResolveOriginResponse struct {
// record is the record of the origin.
Record *Service `protobuf:"bytes,1,opt,name=record,proto3" json:"record,omitempty"`
}
func (m *QueryResolveOriginResponse) Reset() { *m = QueryResolveOriginResponse{} }
func (m *QueryResolveOriginResponse) String() string { return proto.CompactTextString(m) }
func (*QueryResolveOriginResponse) ProtoMessage() {}
func (*QueryResolveOriginResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_81a1010cdbf4bc9c, []int{5}
}
func (m *QueryResolveOriginResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *QueryResolveOriginResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_QueryResolveOriginResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *QueryResolveOriginResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_QueryResolveOriginResponse.Merge(m, src)
}
func (m *QueryResolveOriginResponse) XXX_Size() int {
return m.Size()
}
func (m *QueryResolveOriginResponse) XXX_DiscardUnknown() {
xxx_messageInfo_QueryResolveOriginResponse.DiscardUnknown(m)
}
var xxx_messageInfo_QueryResolveOriginResponse proto.InternalMessageInfo
func (m *QueryResolveOriginResponse) GetRecord() *Service {
if m != nil {
return m.Record
}
return nil
}
func init() {
proto.RegisterType((*QueryParamsRequest)(nil), "svc.v1.QueryParamsRequest")
proto.RegisterType((*QueryParamsResponse)(nil), "svc.v1.QueryParamsResponse")
proto.RegisterType((*QueryOriginExistsRequest)(nil), "svc.v1.QueryOriginExistsRequest")
proto.RegisterType((*QueryOriginExistsResponse)(nil), "svc.v1.QueryOriginExistsResponse")
proto.RegisterType((*QueryResolveOriginRequest)(nil), "svc.v1.QueryResolveOriginRequest")
proto.RegisterType((*QueryResolveOriginResponse)(nil), "svc.v1.QueryResolveOriginResponse")
}
func init() { proto.RegisterFile("svc/v1/query.proto", fileDescriptor_81a1010cdbf4bc9c) }
var fileDescriptor_81a1010cdbf4bc9c = []byte{
// 248 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x2a, 0x2e, 0x4b, 0xd6,
0x2f, 0x33, 0xd4, 0x2f, 0x2c, 0x4d, 0x2d, 0xaa, 0xd4, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62,
0x2b, 0x2e, 0x4b, 0xd6, 0x2b, 0x33, 0x94, 0x92, 0x49, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x4f,
0x2c, 0xc8, 0xd4, 0x4f, 0xcc, 0xcb, 0xcb, 0x2f, 0x49, 0x2c, 0xc9, 0xcc, 0xcf, 0x2b, 0x86, 0xa8,
0x92, 0x12, 0x81, 0xea, 0x4c, 0x4f, 0xcd, 0x4b, 0x2d, 0xce, 0x84, 0x8a, 0x2a, 0x89, 0x70, 0x09,
0x05, 0x82, 0x8c, 0x0a, 0x48, 0x2c, 0x4a, 0xcc, 0x2d, 0x0e, 0x4a, 0x2d, 0x2c, 0x4d, 0x2d, 0x2e,
0x51, 0xb2, 0xe5, 0x12, 0x46, 0x11, 0x2d, 0x2e, 0xc8, 0xcf, 0x2b, 0x4e, 0x15, 0x52, 0xe3, 0x62,
0x2b, 0x00, 0x8b, 0x48, 0x30, 0x2a, 0x30, 0x6a, 0x70, 0x1b, 0xf1, 0xe9, 0x41, 0x6c, 0xd6, 0x83,
0xaa, 0x83, 0xca, 0x1a, 0x25, 0x71, 0xb1, 0x82, 0xb5, 0x0b, 0x45, 0x72, 0xb1, 0x41, 0xa4, 0x84,
0xa4, 0x60, 0x4a, 0x31, 0x6d, 0x93, 0x92, 0xc6, 0x2a, 0x07, 0xb1, 0x53, 0x49, 0xac, 0xe9, 0xf2,
0x93, 0xc9, 0x4c, 0x02, 0x42, 0x7c, 0xfa, 0x50, 0xf7, 0x43, 0xec, 0x70, 0xb2, 0x39, 0xf1, 0x48,
0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0x27, 0x3c, 0x96, 0x63, 0xb8, 0xf0,
0x58, 0x8e, 0xe1, 0xc6, 0x63, 0x39, 0x86, 0x28, 0xa5, 0xf4, 0xcc, 0x92, 0x8c, 0xd2, 0x24, 0xbd,
0xe4, 0xfc, 0x5c, 0xfd, 0xfc, 0xbc, 0xe2, 0xfc, 0xbc, 0x22, 0x7d, 0x30, 0x51, 0x01, 0x36, 0xa1,
0xa4, 0xb2, 0x20, 0xb5, 0x38, 0x89, 0x0d, 0xec, 0x7b, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff,
0x32, 0x0f, 0x6c, 0xb9, 0x4f, 0x01, 0x00, 0x00,
// 398 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0x41, 0x4b, 0xe3, 0x40,
0x1c, 0xc5, 0x9b, 0xc2, 0x86, 0xdd, 0xd9, 0xdd, 0x2a, 0x63, 0x29, 0x75, 0x94, 0xd8, 0xe6, 0x60,
0x3d, 0x65, 0x68, 0x7b, 0xd5, 0x8b, 0xd0, 0xb3, 0x1a, 0x4f, 0x7a, 0x4b, 0xe3, 0x10, 0x07, 0xda,
0x99, 0x74, 0x26, 0x09, 0x2d, 0x22, 0x82, 0x9f, 0x40, 0xf0, 0x4b, 0x79, 0x2c, 0x78, 0x11, 0x4f,
0xd2, 0xfa, 0x41, 0xc4, 0x99, 0x89, 0x18, 0x8c, 0xf5, 0x12, 0x32, 0xff, 0xff, 0x7b, 0xef, 0xc7,
0xbc, 0x04, 0x40, 0x99, 0x85, 0x38, 0xeb, 0xe2, 0x49, 0x4a, 0xc4, 0xcc, 0x8b, 0x05, 0x4f, 0x38,
0xb4, 0x65, 0x16, 0x7a, 0x59, 0x17, 0x6d, 0x47, 0x9c, 0x47, 0x23, 0x82, 0x83, 0x98, 0xe2, 0x80,
0x31, 0x9e, 0x04, 0x09, 0xe5, 0x4c, 0x6a, 0x15, 0xaa, 0x1b, 0x67, 0x44, 0x18, 0x91, 0xd4, 0x4c,
0xdd, 0x3a, 0x80, 0x27, 0xef, 0x51, 0xc7, 0x81, 0x08, 0xc6, 0xd2, 0x27, 0x93, 0x94, 0xc8, 0xc4,
0x3d, 0x00, 0x1b, 0x85, 0xa9, 0x8c, 0x39, 0x93, 0x04, 0xee, 0x02, 0x3b, 0x56, 0x93, 0xa6, 0xd5,
0xb2, 0xf6, 0xfe, 0xf6, 0x6a, 0x9e, 0x26, 0x7b, 0x46, 0x67, 0xb6, 0x6e, 0x0f, 0x34, 0x95, 0xfd,
0x48, 0xd0, 0x88, 0xb2, 0xc1, 0x94, 0xca, 0x24, 0x8f, 0x86, 0x0d, 0x60, 0x73, 0x35, 0x56, 0x19,
0x7f, 0x7c, 0x73, 0x72, 0xfb, 0x60, 0xb3, 0xc4, 0x63, 0xc0, 0x0d, 0x60, 0x13, 0x35, 0x51, 0xa6,
0xdf, 0xbe, 0x39, 0x7d, 0x98, 0x7c, 0x22, 0xf9, 0x28, 0x23, 0xda, 0xfb, 0x13, 0x69, 0x00, 0x50,
0x99, 0xc9, 0xa0, 0x3a, 0xc0, 0x16, 0x24, 0xe4, 0xe2, 0xc2, 0xdc, 0x71, 0x2d, 0xbf, 0xe3, 0x29,
0x11, 0x19, 0x0d, 0x89, 0x6f, 0xd6, 0xbd, 0xe7, 0x2a, 0xf8, 0xa5, 0x72, 0xe0, 0x19, 0xb0, 0x75,
0x01, 0x10, 0xe5, 0xe2, 0xaf, 0x9d, 0xa2, 0xad, 0xd2, 0x9d, 0xa6, 0xba, 0x8d, 0xdb, 0xc7, 0xd7,
0xfb, 0xea, 0x3a, 0xac, 0x61, 0xf3, 0x95, 0x74, 0x93, 0x30, 0x05, 0xff, 0x3e, 0x17, 0x02, 0x5b,
0x85, 0x90, 0x92, 0x7e, 0x51, 0x7b, 0x85, 0xc2, 0xc0, 0x5a, 0x0a, 0x86, 0x60, 0x33, 0x87, 0xe9,
0x62, 0x24, 0xbe, 0xd2, 0x2f, 0xd7, 0xf0, 0x06, 0xfc, 0x2f, 0xb4, 0x03, 0x8b, 0xa9, 0x65, 0x75,
0x23, 0x77, 0x95, 0xc4, 0x90, 0x3b, 0x8a, 0xdc, 0x86, 0x3b, 0xdf, 0x91, 0xb1, 0x2e, 0xf7, 0x70,
0xff, 0x61, 0xe1, 0x58, 0xf3, 0x85, 0x63, 0xbd, 0x2c, 0x1c, 0xeb, 0x6e, 0xe9, 0x54, 0xe6, 0x4b,
0xa7, 0xf2, 0xb4, 0x74, 0x2a, 0xe7, 0x6e, 0x44, 0x93, 0xcb, 0x74, 0xe8, 0x85, 0x7c, 0x8c, 0x39,
0x93, 0x9c, 0x09, 0xac, 0x1e, 0x53, 0x15, 0x99, 0xcc, 0x62, 0x22, 0x87, 0xb6, 0xfa, 0xb7, 0xfb,
0x6f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x0e, 0x55, 0xd8, 0x48, 0x2d, 0x03, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
@ -152,6 +349,10 @@ const _ = grpc.SupportPackageIsVersion4
type QueryClient interface {
// Params queries all parameters of the module.
Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error)
// OriginExists queries if a given origin exists.
OriginExists(ctx context.Context, in *QueryOriginExistsRequest, opts ...grpc.CallOption) (*QueryOriginExistsResponse, error)
// ResolveOrigin queries the domain of a given service and returns its record with capabilities.
ResolveOrigin(ctx context.Context, in *QueryResolveOriginRequest, opts ...grpc.CallOption) (*QueryResolveOriginResponse, error)
}
type queryClient struct {
@ -171,10 +372,32 @@ func (c *queryClient) Params(ctx context.Context, in *QueryParamsRequest, opts .
return out, nil
}
func (c *queryClient) OriginExists(ctx context.Context, in *QueryOriginExistsRequest, opts ...grpc.CallOption) (*QueryOriginExistsResponse, error) {
out := new(QueryOriginExistsResponse)
err := c.cc.Invoke(ctx, "/svc.v1.Query/OriginExists", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *queryClient) ResolveOrigin(ctx context.Context, in *QueryResolveOriginRequest, opts ...grpc.CallOption) (*QueryResolveOriginResponse, error) {
out := new(QueryResolveOriginResponse)
err := c.cc.Invoke(ctx, "/svc.v1.Query/ResolveOrigin", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// QueryServer is the server API for Query service.
type QueryServer interface {
// Params queries all parameters of the module.
Params(context.Context, *QueryParamsRequest) (*QueryParamsResponse, error)
// OriginExists queries if a given origin exists.
OriginExists(context.Context, *QueryOriginExistsRequest) (*QueryOriginExistsResponse, error)
// ResolveOrigin queries the domain of a given service and returns its record with capabilities.
ResolveOrigin(context.Context, *QueryResolveOriginRequest) (*QueryResolveOriginResponse, error)
}
// UnimplementedQueryServer can be embedded to have forward compatible implementations.
@ -184,6 +407,12 @@ type UnimplementedQueryServer struct {
func (*UnimplementedQueryServer) Params(ctx context.Context, req *QueryParamsRequest) (*QueryParamsResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Params not implemented")
}
func (*UnimplementedQueryServer) OriginExists(ctx context.Context, req *QueryOriginExistsRequest) (*QueryOriginExistsResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method OriginExists not implemented")
}
func (*UnimplementedQueryServer) ResolveOrigin(ctx context.Context, req *QueryResolveOriginRequest) (*QueryResolveOriginResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ResolveOrigin not implemented")
}
func RegisterQueryServer(s grpc1.Server, srv QueryServer) {
s.RegisterService(&_Query_serviceDesc, srv)
@ -207,6 +436,42 @@ func _Query_Params_Handler(srv interface{}, ctx context.Context, dec func(interf
return interceptor(ctx, in, info, handler)
}
func _Query_OriginExists_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(QueryOriginExistsRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(QueryServer).OriginExists(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/svc.v1.Query/OriginExists",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(QueryServer).OriginExists(ctx, req.(*QueryOriginExistsRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Query_ResolveOrigin_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(QueryResolveOriginRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(QueryServer).ResolveOrigin(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/svc.v1.Query/ResolveOrigin",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(QueryServer).ResolveOrigin(ctx, req.(*QueryResolveOriginRequest))
}
return interceptor(ctx, in, info, handler)
}
var Query_serviceDesc = _Query_serviceDesc
var _Query_serviceDesc = grpc.ServiceDesc{
ServiceName: "svc.v1.Query",
@ -216,6 +481,14 @@ var _Query_serviceDesc = grpc.ServiceDesc{
MethodName: "Params",
Handler: _Query_Params_Handler,
},
{
MethodName: "OriginExists",
Handler: _Query_OriginExists_Handler,
},
{
MethodName: "ResolveOrigin",
Handler: _Query_ResolveOrigin_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "svc/v1/query.proto",
@ -279,6 +552,134 @@ func (m *QueryParamsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
func (m *QueryOriginExistsRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *QueryOriginExistsRequest) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *QueryOriginExistsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Origin) > 0 {
i -= len(m.Origin)
copy(dAtA[i:], m.Origin)
i = encodeVarintQuery(dAtA, i, uint64(len(m.Origin)))
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func (m *QueryOriginExistsResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *QueryOriginExistsResponse) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *QueryOriginExistsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.Exists {
i--
if m.Exists {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i--
dAtA[i] = 0x8
}
return len(dAtA) - i, nil
}
func (m *QueryResolveOriginRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *QueryResolveOriginRequest) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *QueryResolveOriginRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Origin) > 0 {
i -= len(m.Origin)
copy(dAtA[i:], m.Origin)
i = encodeVarintQuery(dAtA, i, uint64(len(m.Origin)))
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func (m *QueryResolveOriginResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *QueryResolveOriginResponse) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *QueryResolveOriginResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.Record != nil {
{
size, err := m.Record.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintQuery(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func encodeVarintQuery(dAtA []byte, offset int, v uint64) int {
offset -= sovQuery(v)
base := offset
@ -312,6 +713,57 @@ func (m *QueryParamsResponse) Size() (n int) {
return n
}
func (m *QueryOriginExistsRequest) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Origin)
if l > 0 {
n += 1 + l + sovQuery(uint64(l))
}
return n
}
func (m *QueryOriginExistsResponse) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.Exists {
n += 2
}
return n
}
func (m *QueryResolveOriginRequest) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Origin)
if l > 0 {
n += 1 + l + sovQuery(uint64(l))
}
return n
}
func (m *QueryResolveOriginResponse) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.Record != nil {
l = m.Record.Size()
n += 1 + l + sovQuery(uint64(l))
}
return n
}
func sovQuery(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
@ -454,6 +906,326 @@ func (m *QueryParamsResponse) Unmarshal(dAtA []byte) error {
}
return nil
}
func (m *QueryOriginExistsRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowQuery
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: QueryOriginExistsRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: QueryOriginExistsRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Origin", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowQuery
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthQuery
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthQuery
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Origin = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipQuery(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthQuery
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *QueryOriginExistsResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowQuery
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: QueryOriginExistsResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: QueryOriginExistsResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Exists", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowQuery
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.Exists = bool(v != 0)
default:
iNdEx = preIndex
skippy, err := skipQuery(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthQuery
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *QueryResolveOriginRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowQuery
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: QueryResolveOriginRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: QueryResolveOriginRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Origin", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowQuery
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthQuery
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthQuery
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Origin = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipQuery(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthQuery
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *QueryResolveOriginResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowQuery
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: QueryResolveOriginResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: QueryResolveOriginResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Record", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowQuery
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthQuery
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthQuery
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Record == nil {
m.Record = &Service{}
}
if err := m.Record.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipQuery(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthQuery
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipQuery(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0

View File

@ -51,6 +51,114 @@ func local_request_Query_Params_0(ctx context.Context, marshaler runtime.Marshal
}
func request_Query_OriginExists_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq QueryOriginExistsRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["origin"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "origin")
}
protoReq.Origin, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "origin", err)
}
msg, err := client.OriginExists(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_Query_OriginExists_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq QueryOriginExistsRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["origin"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "origin")
}
protoReq.Origin, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "origin", err)
}
msg, err := server.OriginExists(ctx, &protoReq)
return msg, metadata, err
}
func request_Query_ResolveOrigin_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq QueryResolveOriginRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["origin"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "origin")
}
protoReq.Origin, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "origin", err)
}
msg, err := client.ResolveOrigin(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_Query_ResolveOrigin_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq QueryResolveOriginRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["origin"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "origin")
}
protoReq.Origin, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "origin", err)
}
msg, err := server.ResolveOrigin(ctx, &protoReq)
return msg, metadata, err
}
// RegisterQueryHandlerServer registers the http handlers for service Query to "mux".
// UnaryRPC :call QueryServer directly.
// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
@ -80,6 +188,52 @@ func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv
})
mux.Handle("GET", pattern_Query_OriginExists_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_Query_OriginExists_0(rctx, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_Query_OriginExists_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("GET", pattern_Query_ResolveOrigin_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_Query_ResolveOrigin_0(rctx, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_Query_ResolveOrigin_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
}
@ -141,13 +295,61 @@ func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie
})
mux.Handle("GET", pattern_Query_OriginExists_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_Query_OriginExists_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_Query_OriginExists_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("GET", pattern_Query_ResolveOrigin_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_Query_ResolveOrigin_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_Query_ResolveOrigin_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
}
var (
pattern_Query_Params_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"svc", "v1", "params"}, "", runtime.AssumeColonVerbOpt(false)))
pattern_Query_OriginExists_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"svc", "v1", "origins", "origin"}, "", runtime.AssumeColonVerbOpt(false)))
pattern_Query_ResolveOrigin_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"svc", "v1", "origins", "origin", "record"}, "", runtime.AssumeColonVerbOpt(false)))
)
var (
forward_Query_Params_0 = runtime.ForwardResponseMessage
forward_Query_OriginExists_0 = runtime.ForwardResponseMessage
forward_Query_ResolveOrigin_0 = runtime.ForwardResponseMessage
)