Merge branch 'develop'

This commit is contained in:
Prad Nukala 2024-12-05 20:38:27 -05:00
commit 81ea0256c5
755 changed files with 104112 additions and 39325 deletions

View File

@ -5,11 +5,13 @@ WORKDIR /code
USER root:root
RUN mkdir -p /code && chown ${DEVBOX_USER}:${DEVBOX_USER} /code
USER ${DEVBOX_USER}:${DEVBOX_USER}
# Copy devbox configuration files
COPY --chown=${DEVBOX_USER}:${DEVBOX_USER} devbox.json devbox.json
COPY --chown=${DEVBOX_USER}:${DEVBOX_USER} devbox.lock devbox.lock
RUN devbox run -- echo "Installed Packages."
RUN devbox shellenv --init-hook >> ~/.profile

View File

@ -4,11 +4,6 @@
"dockerfile": "./Dockerfile",
"context": ".."
},
"features": {
"ghcr.io/devcontainers/features/sshd:1": {
"version": "latest"
}
},
"customizations": {
"vscode": {
"settings": {},
@ -17,5 +12,78 @@
]
}
},
"remoteUser": "devbox"
}
"remoteUser": "devbox",
"forwardPorts": [
1317,
26657,
9090,
3000,
80,
5000
],
"portsAttributes": {
"1317": {
"label": "sonr-api",
"onAutoForward": "notify"
},
"26657": {
"label": "sonr-rpc",
"onAutoForward": "notify"
},
"9090": {
"label": "sonr-grpc",
"onAutoForward": "silent"
},
"3000": {
"label": "hway-frontend",
"onAutoForward": "silent"
},
"80": {
"label": "ipfs-gateway",
"onAutoForward": "silent"
},
"5000": {
"label": "ipfs-api",
"onAutoForward": "silent"
}
},
"features": {
"ghcr.io/michidk/devcontainers-features/bun:1": {
"version": "latest"
},
"ghcr.io/cirolosapio/devcontainers-features/alpine-ohmyzsh:0": {
"version": "latest"
},
"ghcr.io/guiyomh/features/golangci-lint:0": {
"version": "latest"
},
"ghcr.io/meaningful-ooo/devcontainer-features/homebrew:2": {
"version": "latest"
},
"ghcr.io/devcontainers/features/docker-in-docker:2": {
"version": "latest"
},
"ghcr.io/devcontainers/features/github-cli:1": {
"version": "latest"
},
"ghcr.io/devcontainers/features/go:1": {
"version": "latest",
"go": "1.23"
},
"ghcr.io/devcontainers/features/rust:1": {
"version": "latest",
"rust": "1.73"
},
"ghcr.io/jpawlowski/devcontainer-features/codespaces-dotfiles:1": {
"version": "latest"
},
"ghcr.io/duduribeiro/devcontainer-features/neovim:1": {
"version": "latest"
},
"ghcr.io/dlouwers/devcontainer-features/devbox:1": {
"version": "latest"
},
"ghcr.io/devcontainers/features/sshd:1": {
"version": "latest"
}
}

569
.github/aider/guides/cosmos-proto.md vendored Normal file
View File

@ -0,0 +1,569 @@
# Protocol Buffers in Cosmos SDK
## Overview
The Cosmos SDK uses Protocol Buffers for serialization and API definitions. Generation is handled via a Docker image: `ghcr.io/cosmos/proto-builder:0.15.x`.
## Generation Tools
- **Buf**: Primary tool for protobuf management
- **protocgen.sh**: Core generation script in `scripts/`
- **Makefile Commands**: Standard commands for generate, lint, format
## Key Components
### Buf Configuration
1. **Workspace Setup**
- Root level buf workspace configuration
- Manages multiple protobuf directories
2. **Directory Structure**
```
proto/
├── buf.gen.gogo.yaml # GoGo Protobuf generation
├── buf.gen.pulsar.yaml # Pulsar API generation
├── buf.gen.swagger.yaml # OpenAPI/Swagger docs
├── buf.lock # Dependencies
├── buf.yaml # Core configuration
├── cosmos/ # Core protos
└── tendermint/ # Consensus protos
```
3. **Module Protos**
- Located in `x/{moduleName}/proto`
- Module-specific message definitions
#### `buf.gen.gogo.yaml`
`buf.gen.gogo.yaml` defines how the protobuf files should be generated for use with in the module. This file uses [gogoproto](https://github.com/gogo/protobuf), a separate generator from the google go-proto generator that makes working with various objects more ergonomic, and it has more performant encode and decode steps
```go reference
https://github.com/cosmos/cosmos-sdk/blob/main/proto/buf.gen.gogo.yaml#L1-L9
```
#### `buf.gen.pulsar.yaml`
`buf.gen.pulsar.yaml` defines how protobuf files should be generated using the [new golang apiv2 of protobuf](https://go.dev/blog/protobuf-apiv2). This generator is used instead of the google go-proto generator because it has some extra helpers for Cosmos SDK applications and will have more performant encode and decode than the google go-proto generator. You can follow the development of this generator [here](https://github.com/cosmos/cosmos-proto).
```go reference
https://github.com/cosmos/cosmos-sdk/blob/main/proto/buf.gen.pulsar.yaml#L1-L18
```
#### `buf.gen.swagger.yaml`
`buf.gen.swagger.yaml` generates the swagger documentation for the query and messages of the chain. This will only define the REST API end points that were defined in the query and msg servers. You can find examples of this [here](https://github.com/cosmos/cosmos-sdk/blob/main/x/bank/proto/cosmos/bank/v1beta1/query.proto)
```go reference
https://github.com/cosmos/cosmos-sdk/blob/main/proto/buf.gen.swagger.yaml#L1-L6
```
#### `buf.lock`
This is an autogenerated file based off the dependencies required by the `.gen` files. There is no need to copy the current one. If you depend on cosmos-sdk proto definitions a new entry for the Cosmos SDK will need to be provided. The dependency you will need to use is `buf.build/cosmos/cosmos-sdk`.
```go reference
https://github.com/cosmos/cosmos-sdk/blob/main/proto/buf.lock#L1-L16
```
#### `buf.yaml`
`buf.yaml` defines the [name of your package](https://github.com/cosmos/cosmos-sdk/blob/main/proto/buf.yaml#L3), which [breakage checker](https://buf.build/docs/tutorials/getting-started-with-buf-cli#detect-breaking-changes) to use and how to [lint your protobuf files](https://buf.build/docs/tutorials/getting-started-with-buf-cli#lint-your-api).
It is advised to use a tagged version of the buf modules corresponding to the version of the Cosmos SDK being are used.
```go reference
https://github.com/cosmos/cosmos-sdk/blob/main/proto/buf.yaml#L1-L24
```
We use a variety of linters for the Cosmos SDK protobuf files. The repo also checks this in ci.
A reference to the github actions can be found [here](https://github.com/cosmos/cosmos-sdk/blob/main/.github/workflows/proto.yml#L1-L32)
# ORM
The Cosmos SDK ORM is a state management library that provides a rich, but opinionated set of tools for managing a
module's state. It provides support for:
- type safe management of state
- multipart keys
- secondary indexes
- unique indexes
- easy prefix and range queries
- automatic genesis import/export
- automatic query services for clients, including support for light client proofs (still in development)
- indexing state data in external databases (still in development)
## Design and Philosophy
The ORM's data model is inspired by the relational data model found in SQL databases. The core abstraction is a table
with a primary key and optional secondary indexes.
Because the Cosmos SDK uses protobuf as its encoding layer, ORM tables are defined directly in .proto files using
protobuf options. Each table is defined by a single protobuf `message` type and a schema of multiple tables is
represented by a single .proto file.
Table structure is specified in the same file where messages are defined in order to make it easy to focus on better
design of the state layer. Because blockchain state layout is part of the public API for clients (TODO: link to docs on
light client proofs), it is important to think about the state layout as being part of the public API of a module.
Changing the state layout actually breaks clients, so it is ideal to think through it carefully up front and to aim for
a design that will eliminate or minimize breaking changes down the road. Also, good design of state enables building
more performant and sophisticated applications. Providing users with a set of tools inspired by relational databases
which have a long history of database design best practices and allowing schema to be specified declaratively in a
single place are design choices the ORM makes to enable better design and more durable APIs.
Also, by only supporting the table abstraction as opposed to key-value pair maps, it is easy to add to new
columns/fields to any data structure without causing a breaking change and the data structures can easily be indexed in
any off-the-shelf SQL database for more sophisticated queries.
The encoding of fields in keys is designed to support ordered iteration for all protobuf primitive field types
except for `bytes` as well as the well-known types `google.protobuf.Timestamp` and `google.protobuf.Duration`. Encodings
are optimized for storage space when it makes sense (see the documentation in `cosmos/orm/v1/orm.proto` for more details)
and table rows do not use extra storage space to store key fields in the value.
We recommend that users of the ORM attempt to follow database design best practices such as
[normalization](https://en.wikipedia.org/wiki/Database_normalization) (at least 1NF).
For instance, defining `repeated` fields in a table is considered an anti-pattern because breaks first normal form (1NF).
Although we support `repeated` fields in tables, they cannot be used as key fields for this reason. This may seem
restrictive but years of best practice (and also experience in the SDK) have shown that following this pattern
leads to easier to maintain schemas.
To illustrate the motivation for these principles with an example from the SDK, historically balances were stored
as a mapping from account -> map of denom to amount. This did not scale well because an account with 100 token balances
needed to be encoded/decoded every time a single coin balance changed. Now balances are stored as account,denom -> amount
as in the example above. With the ORM's data model, if we wanted to add a new field to `Balance` such as
`unlocked_balance` (if vesting accounts were redesigned in this way), it would be easy to add it to this table without
requiring a data migration. Because of the ORM's optimizations, the account and denom are only stored in the key part
of storage and not in the value leading to both a flexible data model and efficient usage of storage.
## Defining Tables
To define a table:
1. create a .proto file to describe the module's state (naming it `state.proto` is recommended for consistency),
and import "cosmos/orm/v1/orm.proto", ex:
```protobuf
syntax = "proto3";
package bank_example;
import "cosmos/orm/v1/orm.proto";
```
2. define a `message` for the table, ex:
```protobuf
message Balance {
bytes account = 1;
string denom = 2;
uint64 balance = 3;
}
```
3. add the `cosmos.orm.v1.table` option to the table and give the table an `id` unique within this .proto file:
```protobuf
message Balance {
option (cosmos.orm.v1.table) = {
id: 1
};
bytes account = 1;
string denom = 2;
uint64 balance = 3;
}
```
4. define the primary key field or fields, as a comma-separated list of the fields from the message which should make
up the primary key:
```protobuf
message Balance {
option (cosmos.orm.v1.table) = {
id: 1
primary_key: { fields: "account,denom" }
};
bytes account = 1;
string denom = 2;
uint64 balance = 3;
}
```
5. add any desired secondary indexes by specifying an `id` unique within the table and a comma-separate list of the
index fields:
```protobuf
message Balance {
option (cosmos.orm.v1.table) = {
id: 1;
primary_key: { fields: "account,denom" }
index: { id: 1 fields: "denom" } // this allows querying for the accounts which own a denom
};
bytes account = 1;
string denom = 2;
uint64 amount = 3;
}
```
### Auto-incrementing Primary Keys
A common pattern in SDK modules and in database design is to define tables with a single integer `id` field with an
automatically generated primary key. In the ORM we can do this by setting the `auto_increment` option to `true` on the
primary key, ex:
```protobuf
message Account {
option (cosmos.orm.v1.table) = {
id: 2;
primary_key: { fields: "id", auto_increment: true }
};
uint64 id = 1;
bytes address = 2;
}
```
### Unique Indexes
A unique index can be added by setting the `unique` option to `true` on an index, ex:
```protobuf
message Account {
option (cosmos.orm.v1.table) = {
id: 2;
primary_key: { fields: "id", auto_increment: true }
index: {id: 1, fields: "address", unique: true}
};
uint64 id = 1;
bytes address = 2;
}
```
### Singletons
The ORM also supports a special type of table with only one row called a `singleton`. This can be used for storing
module parameters. Singletons only need to define a unique `id` and that cannot conflict with the id of other
tables or singletons in the same .proto file. Ex:
```protobuf
message Params {
option (cosmos.orm.v1.singleton) = {
id: 3;
};
google.protobuf.Duration voting_period = 1;
uint64 min_threshold = 2;
}
```
## Running Codegen
NOTE: the ORM will only work with protobuf code that implements the [google.golang.org/protobuf](https://pkg.go.dev/google.golang.org/protobuf)
API. That means it will not work with code generated using gogo-proto.
To install the ORM's code generator, run:
```shell
go install cosmossdk.io/orm/cmd/protoc-gen-go-cosmos-orm@latest
```
The recommended way to run the code generator is to use [buf build](https://docs.buf.build/build/usage).
This is an example `buf.gen.yaml` that runs `protoc-gen-go`, `protoc-gen-go-grpc` and `protoc-gen-go-cosmos-orm`
using buf managed mode:
```yaml
version: v1
managed:
enabled: true
go_package_prefix:
default: foo.bar/api # the go package prefix of your package
override:
buf.build/cosmos/cosmos-sdk: cosmossdk.io/api # required to import the Cosmos SDK api module
plugins:
- name: go
out: .
opt: paths=source_relative
- name: go-grpc
out: .
opt: paths=source_relative
- name: go-cosmos-orm
out: .
opt: paths=source_relative
```
## Using the ORM in a module
### Initialization
To use the ORM in a module, first create a `ModuleSchemaDescriptor`. This tells the ORM which .proto files have defined
an ORM schema and assigns them all a unique non-zero id. Ex:
```go
var MyModuleSchema = &ormv1alpha1.ModuleSchemaDescriptor{
SchemaFile: []*ormv1alpha1.ModuleSchemaDescriptor_FileEntry{
{
Id: 1,
ProtoFileName: mymodule.File_my_module_state_proto.Path(),
},
},
}
```
In the ORM generated code for a file named `state.proto`, there should be an interface `StateStore` that got generated
with a constructor `NewStateStore` that takes a parameter of type `ormdb.ModuleDB`. Add a reference to `StateStore`
to your module's keeper struct. Ex:
```go
type Keeper struct {
db StateStore
}
```
Then instantiate the `StateStore` instance via an `ormdb.ModuleDB` that is instantiated from the `SchemaDescriptor`
above and one or more store services from `cosmossdk.io/core/store`. Ex:
```go
func NewKeeper(storeService store.KVStoreService) (*Keeper, error) {
modDb, err := ormdb.NewModuleDB(MyModuleSchema, ormdb.ModuleDBOptions{KVStoreService: storeService})
if err != nil {
return nil, err
}
db, err := NewStateStore(modDb)
if err != nil {
return nil, err
}
return Keeper{db: db}, nil
}
```
### Using the generated code
The generated code for the ORM contains methods for inserting, updating, deleting and querying table entries.
For each table in a .proto file, there is a type-safe table interface implemented in generated code. For instance,
for a table named `Balance` there should be a `BalanceTable` interface that looks like this:
```go
type BalanceTable interface {
Insert(ctx context.Context, balance *Balance) error
Update(ctx context.Context, balance *Balance) error
Save(ctx context.Context, balance *Balance) error
Delete(ctx context.Context, balance *Balance) error
Has(ctx context.Context, account []byte, denom string) (found bool, err error)
// Get returns nil and an error which responds true to ormerrors.IsNotFound() if the record was not found.
Get(ctx context.Context, account []byte, denom string) (*Balance, error)
List(ctx context.Context, prefixKey BalanceIndexKey, opts ...ormlist.Option) (BalanceIterator, error)
ListRange(ctx context.Context, from, to BalanceIndexKey, opts ...ormlist.Option) (BalanceIterator, error)
DeleteBy(ctx context.Context, prefixKey BalanceIndexKey) error
DeleteRange(ctx context.Context, from, to BalanceIndexKey) error
doNotImplement()
}
```
This `BalanceTable` should be accessible from the `StateStore` interface (assuming our file is named `state.proto`)
via a `BalanceTable()` accessor method. If all the above example tables/singletons were in the same `state.proto`,
then `StateStore` would get generated like this:
```go
type BankStore interface {
BalanceTable() BalanceTable
AccountTable() AccountTable
ParamsTable() ParamsTable
doNotImplement()
}
```
So to work with the `BalanceTable` in a keeper method we could use code like this:
```go
func (k keeper) AddBalance(ctx context.Context, acct []byte, denom string, amount uint64) error {
balance, err := k.db.BalanceTable().Get(ctx, acct, denom)
if err != nil && !ormerrors.IsNotFound(err) {
return err
}
if balance == nil {
balance = &Balance{
Account: acct,
Denom: denom,
Amount: amount,
}
} else {
balance.Amount = balance.Amount + amount
}
return k.db.BalanceTable().Save(ctx, balance)
}
```
`List` methods take `IndexKey` parameters. For instance, `BalanceTable.List` takes `BalanceIndexKey`. `BalanceIndexKey`
let's represent index keys for the different indexes (primary and secondary) on the `Balance` table. The primary key
in the `Balance` table gets a struct `BalanceAccountDenomIndexKey` and the first index gets an index key `BalanceDenomIndexKey`.
If we wanted to list all the denoms and amounts that an account holds, we would use `BalanceAccountDenomIndexKey`
with a `List` query just on the account prefix. Ex:
```go
it, err := keeper.db.BalanceTable().List(ctx, BalanceAccountDenomIndexKey{}.WithAccount(acct))
```
---
## sidebar_position: 1
# ProtocolBuffer Annotations
This document explains the various protobuf scalars that have been added to make working with protobuf easier for Cosmos SDK application developers
## Signer
Signer specifies which field should be used to determine the signer of a message for the Cosmos SDK. This field can be used for clients as well to infer which field should be used to determine the signer of a message.
Read more about the signer field [here](./02-messages-and-queries.md).
```protobuf reference
https://github.com/cosmos/cosmos-sdk/blob/e6848d99b55a65d014375b295bdd7f9641aac95e/proto/cosmos/bank/v1beta1/tx.proto#L40
```
```proto
option (cosmos.msg.v1.signer) = "from_address";
```
## Scalar
The scalar type defines a way for clients to understand how to construct protobuf messages according to what is expected by the module and sdk.
```proto
(cosmos_proto.scalar) = "cosmos.AddressString"
```
Example of account address string scalar:
```proto reference
https://github.com/cosmos/cosmos-sdk/blob/e6848d99b55a65d014375b295bdd7f9641aac95e/proto/cosmos/bank/v1beta1/tx.proto#L46
```
Example of validator address string scalar:
```proto reference
https://github.com/cosmos/cosmos-sdk/blob/e8f28bf5db18b8d6b7e0d94b542ce4cf48fed9d6/proto/cosmos/distribution/v1beta1/query.proto#L87
```
Example of pubkey scalar:
```proto reference
https://github.com/cosmos/cosmos-sdk/blob/11068bfbcd44a7db8af63b6a8aa079b1718f6040/proto/cosmos/staking/v1beta1/tx.proto#L94
```
Example of Decimals scalar:
```proto reference
https://github.com/cosmos/cosmos-sdk/blob/e8f28bf5db18b8d6b7e0d94b542ce4cf48fed9d6/proto/cosmos/distribution/v1beta1/distribution.proto#L26
```
Example of Int scalar:
```proto reference
https://github.com/cosmos/cosmos-sdk/blob/e8f28bf5db18b8d6b7e0d94b542ce4cf48fed9d6/proto/cosmos/gov/v1/gov.proto#L137
```
There are a few options for what can be provided as a scalar: `cosmos.AddressString`, `cosmos.ValidatorAddressString`, `cosmos.ConsensusAddressString`, `cosmos.Int`, `cosmos.Dec`.
## Implements_Interface
Implement interface is used to provide information to client tooling like [telescope](https://github.com/cosmology-tech/telescope) on how to encode and decode protobuf messages.
```proto
option (cosmos_proto.implements_interface) = "cosmos.auth.v1beta1.AccountI";
```
## Method,Field,Message Added In
`method_added_in`, `field_added_in` and `message_added_in` are annotations to denotate to clients that a field has been supported in a later version. This is useful when new methods or fields are added in later versions and that the client needs to be aware of what it can call.
The annotation should be worded as follow:
```proto
option (cosmos_proto.method_added_in) = "cosmos-sdk v0.50.1";
option (cosmos_proto.method_added_in) = "x/epochs v1.0.0";
option (cosmos_proto.method_added_in) = "simapp v24.0.0";
```
## Amino
The amino codec was removed in `v0.50+`, this means there is not a need register `legacyAminoCodec`. To replace the amino codec, Amino protobuf annotations are used to provide information to the amino codec on how to encode and decode protobuf messages.
:::note
Amino annotations are only used for backwards compatibility with amino. New modules are not required use amino annotations.
:::
The below annotations are used to provide information to the amino codec on how to encode and decode protobuf messages in a backwards compatible manner.
### Name
Name specifies the amino name that would show up for the user in order for them see which message they are signing.
```proto
option (amino.name) = "cosmos-sdk/BaseAccount";
```
```proto reference
https://github.com/cosmos/cosmos-sdk/blob/e8f28bf5db18b8d6b7e0d94b542ce4cf48fed9d6/proto/cosmos/bank/v1beta1/tx.proto#L41
```
### Field_Name
Field name specifies the amino name that would show up for the user in order for them see which field they are signing.
```proto
uint64 height = 1 [(amino.field_name) = "public_key"];
```
```proto reference
https://github.com/cosmos/cosmos-sdk/blob/e8f28bf5db18b8d6b7e0d94b542ce4cf48fed9d6/proto/cosmos/distribution/v1beta1/distribution.proto#L166
```
### Dont_OmitEmpty
Dont omitempty specifies that the field should not be omitted when encoding to amino.
```proto
repeated cosmos.base.v1beta1.Coin amount = 3 [(amino.dont_omitempty) = true];
```
```proto reference
https://github.com/cosmos/cosmos-sdk/blob/e8f28bf5db18b8d6b7e0d94b542ce4cf48fed9d6/proto/cosmos/bank/v1beta1/bank.proto#L56
```
### Encoding
Encoding instructs the amino json marshaler how to encode certain fields that may differ from the standard encoding behaviour. The most common example of this is how `repeated cosmos.base.v1beta1.Coin` is encoded when using the amino json encoding format. The `legacy_coins` option tells the json marshaler [how to encode a null slice](https://github.com/cosmos/cosmos-sdk/blob/e8f28bf5db18b8d6b7e0d94b542ce4cf48fed9d6/x/tx/signing/aminojson/json_marshal.go#L65) of `cosmos.base.v1beta1.Coin`.
```proto
(amino.encoding) = "legacy_coins",
```
```proto reference
https://github.com/cosmos/cosmos-sdk/blob/e8f28bf5db18b8d6b7e0d94b542ce4cf48fed9d6/proto/cosmos/bank/v1beta1/genesis.proto#L23
```
Another example is a protobuf `bytes` that contains a valid JSON document.
The `inline_json` option tells the json marshaler to embed the JSON bytes into the wrapping document without escaping.
```proto
(amino.encoding) = "inline_json",
```
E.g. the bytes containing `{"foo":123}` in the `envelope` field would lead to the following JSON:
```json
{
"envelope": {
"foo": 123
}
}
```
If the bytes are not valid JSON, this leads to JSON broken documents. Thus a JSON validity check needs to be in place at some point of the process.

627
.github/aider/guides/cosmos-rfc.md vendored Normal file
View File

@ -0,0 +1,627 @@
# RFC 004: Account System Refactor
## Status
- Draft v2 (May 2023)
## Current Limitations
1. **Account Representation**: Limited by `google.Protobuf.Any` encapsulation and basic authentication methods
2. **Interface Constraints**: Lacks support for advanced functionalities like vesting and complex auth systems
3. **Implementation Rigidity**: Poor differentiation between account types (e.g., `ModuleAccount`)
4. **Authorization System**: Basic `x/auth` module with limited scope beyond `x/bank` functionality
5. **Dependency Issues**: Cyclic dependencies between modules (e.g., `x/auth``x/bank` for vesting)
## Proposal
This proposal aims to transform the way accounts are managed within the Cosmos SDK by introducing significant changes to
their structure and functionality.
### Rethinking Account Representation and Business Logic
Instead of representing accounts as simple `google.Protobuf.Any` structures stored in state with no business logic
attached, this proposal suggests a more sophisticated account representation that is closer to module entities.
In fact, accounts should be able to receive messages and process them in the same way modules do, and be capable of storing
state in a isolated (prefixed) portion of state belonging only to them, in the same way as modules do.
### Account Message Reception
We propose that accounts should be able to receive messages in the same way modules can, allowing them to manage their
own state modifications without relying on other modules. This change would enable more advanced account functionality, such as the
`VestingAccount` example, where the x/bank module previously needed to change the vestingState by casting the abstracted
account to `VestingAccount` and triggering the `TrackDelegation` call. Accounts are already capable of sending messages when
a state transition, originating from a transaction, is executed.
When accounts receive messages, they will be able to identify the sender of the message and decide how to process the
state transition, if at all.
### Consequences
These changes would have significant implications for the Cosmos SDK, resulting in a system of actors that are equal from
the runtime perspective. The runtime would only be responsible for propagating messages between actors and would not
manage the authorization system. Instead, actors would manage their own authorizations. For instance, there would be no
need for the `x/auth` module to manage minting or burning of coins permissions, as it would fall within the scope of the
`x/bank` module.
The key difference between accounts and modules would lie in the origin of the message (state transition). Accounts
(ExternallyOwnedAccount), which have credentials (e.g., a public/private key pairing), originate state transitions from
transactions. In contrast, module state transitions do not have authentication credentials backing them and can be
caused by two factors: either as a consequence of a state transition coming from a transaction or triggered by a scheduler
(e.g., the runtime's Begin/EndBlock).
By implementing these proposed changes, the Cosmos SDK will benefit from a more extensible, versatile, and efficient account
management system that is better suited to address the requirements of the Cosmos ecosystem.
#### Standardization
With `x/accounts` allowing a modular api there becomes a need for standardization of accounts or the interfaces wallets and other clients should expect to use. For this reason we will be using the [`CIP` repo](https://github.com/cosmos/cips) in order to standardize interfaces in order for wallets to know what to expect when interacting with accounts.
## Implementation
### Account Definition
We define the new `Account` type, which is what an account needs to implement to be treated as such.
An `Account` type is defined at APP level, so it cannot be dynamically loaded as the chain is running without upgrading the
node code, unless we create something like a `CosmWasmAccount` which is an account backed by an `x/wasm` contract.
```go
// Account is what the developer implements to define an account.
type Account[InitMsg proto.Message] interface {
// Init is the function that initialises an account instance of a given kind.
// InitMsg is used to initialise the initial state of an account.
Init(ctx *Context, msg InitMsg) error
// RegisterExecuteHandlers registers an account's execution messages.
RegisterExecuteHandlers(executeRouter *ExecuteRouter)
// RegisterQueryHandlers registers an account's query messages.
RegisterQueryHandlers(queryRouter *QueryRouter)
// RegisterMigrationHandlers registers an account's migration messages.
RegisterMigrationHandlers(migrationRouter *MigrationRouter)
}
```
### The InternalAccount definition
The public `Account` interface implementation is then converted by the runtime into an `InternalAccount` implementation,
which contains all the information and business logic needed to operate the account.
```go
type Schema struct {
state StateSchema // represents the state of an account
init InitSchema // represents the init msg schema
exec ExecSchema // represents the multiple execution msg schemas, containing also responses
query QuerySchema // represents the multiple query msg schemas, containing also responses
migrate *MigrateSchema // represents the multiple migrate msg schemas, containing also responses, it's optional
}
type InternalAccount struct {
init func(ctx *Context, msg proto.Message) (*InitResponse, error)
execute func(ctx *Context, msg proto.Message) (*ExecuteResponse, error)
query func(ctx *Context, msg proto.Message) (proto.Message, error)
schema func() *Schema
migrate func(ctx *Context, msg proto.Message) (*MigrateResponse, error)
}
```
This is an internal view of the account as intended by the system. It is not meant to be what developers implement. An
example implementation of the `InternalAccount` type can be found in [this](https://github.com/testinginprod/accounts-poc/blob/main/examples/recover/recover.go)
example of account whose credentials can be recovered. In fact, even if the `Internal` implementation is untyped (with
respect to `proto.Message`), the concrete implementation is fully typed.
During any of the execution methods of `InternalAccount`, `schema` excluded, the account is given a `Context` which provides:
- A namespaced `KVStore` for the account, which isolates the account state from others (NOTE: no `store keys` needed,
the account address serves as `store key`).
- Information regarding itself (its address)
- Information regarding the sender.
- ...
#### Init
Init defines the entrypoint that allows for a new account instance of a given kind to be initialised.
The account is passed some opaque protobuf message which is then interpreted and contains the instructions that
constitute the initial state of an account once it is deployed.
An `Account` code can be deployed multiple times through the `Init` function, similar to how a `CosmWasm` contract code
can be deployed (Instantiated) multiple times.
#### Execute
Execute defines the entrypoint that allows an `Account` to process a state transition, the account can decide then how to
process the state transition based on the message provided and the sender of the transition.
#### Query
Query defines a read-only entrypoint that provides a stable interface that links an account with its state. The reason for
which `Query` is still being preferred as an addition to raw state reflection is to:
- Provide a stable interface for querying (state can be optimised and change more frequently than a query)
- Provide a way to define an account `Interface` with respect to its `Read/Write` paths.
- Provide a way to query information that cannot be processed from raw state reflection, ex: compute information from lazy
state that has not been yet concretely processed (eg: balances with respect to lazy inputs/outputs)
#### Schema
Schema provides the definition of an account from `API` perspective, and it's the only thing that should be taken into account
when interacting with an account from another account or module, for example: an account is an `authz-interface` account if
it has the following message in its execution messages `MsgProxyStateTransition{ state_transition: google.Protobuf.Any }`.
### Migrate
Migrate defines the entrypoint that allows an `Account` to migrate its state from a previous version to a new one. Migrations
can be initiated only by the account itself, concretely this means that the migrate action sender can only be the account address
itself, if the account wants to allow another address to migrate it on its behalf then it could create an execution message
that makes the account migrate itself.
### x/accounts module
In order to create accounts we define a new module `x/accounts`, note that `x/accounts` deploys account with no authentication
credentials attached to it which means no action of an account can be incepted from a TX, we will later explore how the
`x/authn` module uses `x/accounts` to deploy authenticated accounts.
This also has another important implication for which account addresses are now fully decoupled from the authentication mechanism
which makes in turn off-chain operations a little more complex, as the chain becomes the real link between account identifier
and credentials.
We could also introduce a way to deterministically compute the account address.
Note, from the transaction point of view, the `init_message` and `execute_message` are opaque `google.Protobuf.Any`.
The module protobuf definition for `x/accounts` are the following:
```protobuf
// Msg defines the Msg service.
service Msg {
rpc Deploy(MsgDeploy) returns (MsgDeployResponse);
rpc Execute(MsgExecute) returns (MsgExecuteResponse);
rpc Migrate(MsgMigrate) returns (MsgMigrateResponse);
}
message MsgDeploy {
string sender = 1;
string kind = 2;
google.Protobuf.Any init_message = 3;
repeated google.Protobuf.Any authorize_messages = 4 [(gogoproto.nullable) = false];
}
message MsgDeployResponse {
string address = 1;
uint64 id = 2;
google.Protobuf.Any data = 3;
}
message MsgExecute {
string sender = 1;
string address = 2;
google.Protobuf.Any message = 3;
repeated google.Protobuf.Any authorize_messages = 4 [(gogoproto.nullable) = false];
}
message MsgExecuteResponse {
google.Protobuf.Any data = 1;
}
message MsgMigrate {
string sender = 1;
string new_account_kind = 2;
google.Protobuf.Any migrate_message = 3;
}
message MsgMigrateResponse {
google.Protobuf.Any data = 1;
}
```
#### MsgDeploy
Deploys a new instance of the given account `kind` with initial settings represented by the `init_message` which is a `google.Protobuf.Any`.
Of course the `init_message` can be empty. A response is returned containing the account ID and humanised address, alongside some response
that the account instantiation might produce.
#### Address derivation
In order to decouple public keys from account addresses, we introduce a new address derivation mechanism which is
#### MsgExecute
Sends a `StateTransition` execution request, where the state transition is represented by the `message` which is a `google.Protobuf.Any`.
The account can then decide if to process it or not based on the `sender`.
### MsgMigrate
Migrates an account to a new version of itself, the new version is represented by the `new_account_kind`. The state transition
can only be incepted by the account itself, which means that the `sender` must be the account address itself. During the migration
the account current state is given to the new version of the account, which then executes the migration logic using the `migrate_message`,
it might change state or not, it's up to the account to decide. The response contains possible data that the account might produce
after the migration.
#### Authorize Messages
The `Deploy` and `Execute` messages have a field in common called `authorize_messages`, these messages are messages that the account
can execute on behalf of the sender. For example, in case an account is expecting some funds to be sent from the sender,
the sender can attach a `MsgSend` that the account can execute on the sender's behalf. These authorizations are short-lived,
they live only for the duration of the `Deploy` or `Execute` message execution, or until they are consumed.
An alternative would have been to add a `funds` field, like it happens in cosmwasm, which guarantees the called contract that
the funds are available and sent in the context of the message execution. This would have been a simpler approach, but it would
have been limited to the context of `MsgSend` only, where the asset is `sdk.Coins`. The proposed generic way, instead, allows
the account to execute any message on behalf of the sender, which is more flexible, it could include NFT send execution, or
more complex things like `MsgMultiSend` or `MsgDelegate`, etc.
### Further discussion
#### Sub-accounts
We could provide a way to link accounts to other accounts. Maybe during deployment the sender could decide to link the
newly created to its own account, although there might be use-cases for which the deployer is different from the account
that needs to be linked, in this case a handshake protocol on linking would need to be defined.
#### Predictable address creation
We need to provide a way to create an account with a predictable address, this might serve a lot of purposes, like accounts
wanting to generate an address that:
- nobody else can claim besides the account used to generate the new account
- is predictable
For example:
```protobuf
message MsgDeployPredictable {
string sender = 1;
uint32 nonce = 2;
...
}
```
And then the address becomes `bechify(concat(sender, nonce))`
`x/accounts` would still use the monotonically increasing sequence as account number.
#### Joining Multiple Accounts
As developers are building new kinds of accounts, it becomes necessary to provide a default way to combine the
functionalities of different account types. This allows developers to avoid duplicating code and enables end-users to
create or migrate to accounts with multiple functionalities without requiring custom development.
To address this need, we propose the inclusion of a default account type called "MultiAccount". The MultiAccount type is
designed to merge the functionalities of other accounts by combining their execution, query, and migration APIs.
The account joining process would only fail in the case of API (intended as non-state Schema APIs) conflicts, ensuring
compatibility and consistency.
With the introduction of the MultiAccount type, users would have the option to either migrate their existing accounts to
a MultiAccount type or extend an existing MultiAccount with newer APIs. This flexibility empowers users to leverage
various account functionalities without compromising compatibility or resorting to manual code duplication.
The MultiAccount type serves as a standardized solution for combining different account functionalities within the
cosmos-sdk ecosystem. By adopting this approach, developers can streamline the development process and users can benefit
from a modular and extensible account system.
# ADR 071: Cryptography v2- Multi-curve support
## Change log
- May 7th 2024: Initial Draft (Zondax AG: @raynaudoe @juliantoledano @jleni @educlerici-zondax @lucaslopezf)
- June 13th 2024: Add CometBFT implementation proposal (Zondax AG: @raynaudoe @juliantoledano @jleni @educlerici-zondax @lucaslopezf)
- July 2nd 2024: Split ADR proposal, add link to ADR in cosmos/crypto (Zondax AG: @raynaudoe @juliantoledano @jleni @educlerici-zondax @lucaslopezf)
## Status
DRAFT
## Abstract
This ADR proposes the refactoring of the existing `Keyring` and `cosmos-sdk/crypto` code to implement [ADR-001-CryptoProviders](https://github.com/cosmos/crypto/blob/main/docs/architecture/adr-001-crypto-provider.md).
For in-depth details of the `CryptoProviders` and their design please refer to ADR mentioned above.
## Introduction
The introduction of multi-curve support in the cosmos-sdk cryptographic package offers significant advantages. By not being restricted to a single cryptographic curve, developers can choose the most appropriate curve based on security, performance, and compatibility requirements. This flexibility enhances the application's ability to adapt to evolving security standards and optimizes performance for specific use cases, helping to future-proofing the sdk's cryptographic capabilities.
The enhancements in this proposal not only render the ["Keyring ADR"](https://github.com/cosmos/cosmos-sdk/issues/14940) obsolete, but also encompass its key aspects, replacing it with a more flexible and comprehensive approach. Furthermore, the gRPC service proposed in the mentioned ADR can be easily implemented as a specialized `CryptoProvider`.
### Glossary
1. **Interface**: In the context of this document, "interface" refers to Go's interface.
2. **Module**: In this document, "module" refers to a Go module.
3. **Package**: In the context of Go, a "package" refers to a unit of code organization.
## Context
In order to fully understand the need for changes and the proposed improvements, it's crucial to consider the current state of affairs:
- The Cosmos SDK currently lacks a comprehensive ADR for the cryptographic package.
- If a blockchain project requires a cryptographic curve that is not supported by the current SDK, the most likely scenario is that they will need to fork the SDK repository and make modifications. These modifications could potentially make the fork incompatible with future updates from the upstream SDK, complicating maintenance and integration.
- Type leakage of specific crypto data types expose backward compatibility and extensibility challenges.
- The demand for a more flexible and extensible approach to cryptography and address management is high.
- Architectural changes are necessary to resolve many of the currently open issues related to new curves support.
- There is a current trend towards modularity in the Interchain stack (e.g., runtime modules).
- Security implications are a critical consideration during the redesign work.
## Objectives
The key objectives for this proposal are:
- Leverage `CryptoProviders`: Utilize them as APIs for cryptographic tools, ensuring modularity, flexibility, and ease of integration.
Developer-Centric Approach
- Prioritize clear, intuitive interfaces and best-practice design principles.
Quality Assurance
- Enhanced Test Coverage: Improve testing methodologies to ensure the robustness and reliability of the module.
## Technical Goals
New Keyring:
- Design a new `Keyring` interface with modular backends injection system to support hardware devices and cloud-based HSMs. This feature is optional and tied to complexity; if it proves too complex, it will be deferred to a future release as an enhancement.
## Proposed architecture
### Components
The main components to be used will be the same as those found in the [ADR-001](https://github.com/cosmos/crypto/blob/main/docs/architecture/adr-001-crypto-provider.md#components).
#### Storage and persistence
The storage and persistence layer is tasked with storing a `CryptoProvider`s. Specifically, this layer must:
- Securely store the crypto provider's associated private key (only if stored locally, otherwise a reference to the private key will be stored instead).
- Store the [`ProviderMetadata`](https://github.com/cosmos/crypto/blob/main/docs/architecture/adr-001-crypto-provider.md#metadata) struct which contains the data that distinguishes that provider.
The purpose of this layer is to ensure that upon retrieval of the persisted data, we can access the provider's type, version, and specific configuration (which varies based on the provider type). This information will subsequently be utilized to initialize the appropriate factory, as detailed in the following section on the factory pattern.
The storage proposal involves using a modified version of the [Record](https://github.com/cosmos/cosmos-sdk/blob/main/proto/cosmos/crypto/keyring/v1/record.proto) struct, which is already defined in **Keyring/v1**. Additionally, we propose utilizing the existing keyring backends (keychain, filesystem, memory, etc.) to store these `Record`s in the same manner as the current **Keyring/v1**.
_Note: This approach will facilitate a smoother migration path from the current Keyring/v1 to the proposed architecture._
Below is the proposed protobuf message to be included in the modified `Record.proto` file
##### Protobuf message structure
The [record.proto](https://github.com/cosmos/cosmos-sdk/blob/main/proto/cosmos/crypto/keyring/v1/record.proto) file will be modified to include the `CryptoProvider` message as an optional field as follows.
```protobuf
// record.proto
message Record {
string name = 1;
google.protobuf.Any pub_key = 2;
oneof item {
Local local = 3;
Ledger ledger = 4;
Multi multi = 5;
Offline offline = 6;
CryptoProvider crypto_provider = 7; // <- New
}
message Local {
google.protobuf.Any priv_key = 1;
}
message Ledger {
hd.v1.BIP44Params path = 1;
}
message Multi {}
message Offline {}
}
```
##### Creating and loading a `CryptoProvider`
For creating providers, we propose a _factory pattern_ and a _registry_ for these builders. Examples of these
patterns can be found [here](https://github.com/cosmos/crypto/blob/main/docs/architecture/adr-001-crypto-provider.md#illustrative-code-snippets)
##### Keyring
The new `Keyring` interface will serve as a central hub for managing and fetching `CryptoProviders`. To ensure a smoother migration path, the new Keyring will be backward compatible with the previous version. Since this will be the main API from which applications will obtain their `CryptoProvider` instances, the proposal is to extend the Keyring interface to include the methods:
```go
type KeyringV2 interface {
// methods from Keyring/v1
// ListCryptoProviders returns a list of all the stored CryptoProvider metadata.
ListCryptoProviders() ([]ProviderMetadata, error)
// GetCryptoProvider retrieves a specific CryptoProvider by its id.
GetCryptoProvider(id string) (CryptoProvider, error)
}
```
_Note_: Methods to obtain a provider from a public key or other means that make it easier to load the desired provider can be added.
##### Especial use case: remote signers
It's important to note that the `CryptoProvider` interface is versatile enough to be implemented as a remote signer. This capability allows for the integration of remote cryptographic operations, which can be particularly useful in distributed or cloud-based environments where local cryptographic resources are limited or need to be managed centrally.
## Alternatives
It is important to note that all the code presented in this document is not in its final form and could be subject to changes at the time of implementation. The examples and implementations discussed should be interpreted as alternatives, providing a conceptual framework rather than definitive solutions. This flexibility allows for adjustments based on further insights, technical evaluations, or changing requirements as development progresses.
## Decision
We will:
- Leverage crypto providers
- Refactor the module structure as described above.
- Define types and interfaces as the code attached.
- Refactor existing code into new structure and interfaces.
- Implement Unit Tests to ensure no backward compatibility issues.
## Consequences
### Impact on the SDK codebase
We can divide the impact of this ADR into two main categories: state machine code and client related code.
#### Client
The major impact will be on the client side, where the current `Keyring` interface will be replaced by the new `KeyringV2` interface. At first, the impact will be low since `CryptoProvider` is an optional field in the `Record` message, so there's no mandatory requirement for migrating to this new concept right away. This allows a progressive transition where the risks of breaking changes or regressions are minimized.
#### State Machine
The impact on the state machine code will be minimal, the modules affected (at the time of writing this ADR)
are the `x/accounts` module, specifically the `Authenticate` function and the `x/auth/ante` module. This function will need to be adapted to use a `CryptoProvider` service to make use of the `Verifier` instance.
Worth mentioning that there's also the alternative of using `Verifier` instances in a standalone fashion (see note below).
The specific way to adapt these modules will be deeply analyzed and decided at implementation time of this ADR.
_Note_: All cryptographic tools (hashers, verifiers, signers, etc.) will continue to be available as standalone packages that can be imported and utilized directly without the need for a `CryptoProvider` instance. However, the `CryptoProvider` is the recommended method for using these tools as it offers a more secure way to handle sensitive data, enhanced modularity, and the ability to store configurations and metadata within the `CryptoProvider` definition.
### Backwards Compatibility
The proposed migration path is similar to what the cosmos-sdk has done in the past. To ensure a smooth transition, the following steps will be taken:
Once ADR-001 is implemented with a stable release:
- Deprecate the old crypto package. The old crypto package will still be usable, but it will be marked as deprecated and users can opt to use the new package.
- Migrate the codebase to use the new cosmos/crypto package and remove the old crypto one.
### Positive
- Single place of truth
- Easier to use interfaces
- Easier to extend
- Unit test for each crypto package
- Greater maintainability
- Incentivize addition of implementations instead of forks
- Decoupling behavior from implementation
- Sanitization of code
### Negative
- It will involve an effort to adapt existing code.
- It will require attention to detail and audition.
### Neutral
- It will involve extensive testing.
## Test Cases
- The code will be unit tested to ensure a high code coverage
- There should be integration tests around Keyring and CryptoProviders.
> While an ADR is in the DRAFT or PROPOSED stage, this section should contain a
> summary of issues to be solved in future iterations (usually referencing comments
> from a pull-request discussion).
>
> Later, this section can optionally list ideas or improvements the author or
> reviewers found during the analysis of this ADR.
# ADR-71 Bank V2
## Status
DRAFT
## Changelog
- 2024-05-08: Initial Draft (@samricotta, @julienrbrt)
## Abstract
The primary objective of refactoring the bank module is to simplify and enhance the functionality of the Cosmos SDK. Over time the bank module has been burdened with numerous responsibilities including transaction handling, account restrictions, delegation counting, and the minting and burning of coins.
In addition to the above, the bank module is currently too rigid and handles too many tasks, so this proposal aims to streamline the module by focusing on core functions `Send`, `Mint`, and `Burn`.
Currently, the module is split across different keepers with scattered and duplicates functionalities (with 4 send functions for instance).
Additionally, the integration of the token factory into the bank module allows for standardization, and better integration within the core modules.
This rewrite will reduce complexity and enhance the efficiency and UX of the bank module.
## Context
The current implementation of the bank module is characterised by its handling of a broad array of functions, leading to significant complexity in using and extending the bank module.
These issues have underscored the need for a refactoring strategy that simplifies the modules architecture and focuses on its most essential operations.
Additionally, there is an overlap in functionality with a Token Factory module, which could be integrated to streamline oper.
## Decision
**Permission Tightening**: Access to the module can be restricted to selected denominations only, ensuring that it operates within designated boundaries and does not exceed its intended scope. Currently, the permissions allow all denoms, so this should be changed. Send restrictions functionality will be maintained.
**Simplification of Logic**: The bank module will focus on core functionalities `Send`, `Mint`, and `Burn`. This refinement aims to streamline the architecture, enhancing both maintainability and performance.
**Integration of Token Factory**: The Token Factory will be merged into the bank module. This consolidation of related functionalities aims to reduce redundancy and enhance coherence within the system. Migrations functions will be provided for migrating from Osmosis' Token Factory module to bank/v2.
**Legacy Support**: A legacy wrapper will be implemented to ensure compatibility with about 90% of existing functions. This measure will facilitate a smooth transition while keeping older systems functional.
**Denom Implementation**: A asset interface will be added to standardise interactions such as transfers, balance inquiries, minting, and burning across different tokens. This will allow the bank module to support arbitrary asset types, enabling developers to implement custom, ERC20-like denominations.
For example, currently if a team would like to extend the transfer method the changes would apply universally, affecting all denoms. With the proposed Asset Interface, it allows teams to customise or extend the transfer method specifically for their own tokens without impacting others.
These improvements are expected to enhance the flexibility of the bank module, allowing for the creation of custom tokens similar to ERC20 standards and assets backed by CosmWasm (CW) contracts. The integration efforts will also aim to unify CW20 with bank coins across the Cosmos chains.
Example of denom interface:
```go
type AssetInterface interface {
Transfer(ctx sdk.Context, from sdk.AccAddress, to sdk.AccAddress, amount sdk.Coin) error
Mint(ctx sdk.Context, to sdk.AccAddress, amount sdk.Coin) error
Burn(ctx sdk.Context, from sdk.AccAddress, amount sdk.Coin) error
QueryBalance(ctx sdk.Context, account sdk.AccAddress) (sdk.Coin, error)
}
```
Overview of flow:
1. Alice initiates a transfer by entering Bob's address and the amount (100 ATOM)
2. The Bank module verifies that the ATOM token implements the `AssetInterface` by querying the `ATOM_Denom_Account`, which is an `x/account` denom account.
3. The Bank module executes the transfer by subtracting 100 ATOM from Alices balance and adding 100 ATOM to Bobs balance.
4. The Bank module calls the Transfer method on the `ATOM_Denom_Account`. The Transfer method, defined in the `AssetInterface`, handles the logic to subtract 100 ATOM from Alices balance and add 100 ATOM to Bobs balance.
5. The Bank module updates the chain and returns the new balances.
6. Both Alice and Bob successfully receive the updated balances.
## Migration Plans
Bank is a widely used module, so getting a v2 needs to be thought thoroughly. In order to not force all dependencies to immediately migrate to bank/v2, the same _upgrading_ path will be taken as for the `gov` module.
This means `cosmossdk.io/bank` will stay one module and there won't be a new `cosmossdk.io/bank/v2` go module. Instead the bank protos will be versioned from `v1beta1` (current bank) to `v2`.
Bank `v1beta1` endpoints will use the new bank v2 implementation for maximum backward compatibility.
The bank `v1beta1` keepers will be deprecated and potentially eventually removed, but its proto and messages definitions will remain.
Additionally, as bank plans to integrate token factory, migrations functions will be provided to migrate from Osmosis token factory implementation (most widely used implementation) to the new bank/v2 token factory.
## Consequences
### Positive
- Simplified interaction with bank APIs
- Backward compatible changes (no contracts or apis broken)
- Optional migration (note: bank `v1beta1` won't get any new feature after bank `v2` release)
### Neutral
- Asset implementation not available cross-chain (IBC-ed custom asset should possibly fallback to the default implementation)
- Many assets may slow down bank balances requests
### Negative
- Temporarily duplicate functionalities as bank `v1beta1` are `v2` are living alongside
- Difficultity to ever completely remove bank `v1beta1`
### References
- Current bank module implementation: https://github.com/cosmos/cosmos-sdk/blob/v0.50.6/x/bank/keeper/keeper.go#L22-L53
- Osmosis token factory: https://github.com/osmosis-labs/osmosis/tree/v25.0.0/x/tokenfactory/keeper

685
.github/aider/guides/cosmos-sdk.md vendored Normal file
View File

@ -0,0 +1,685 @@
# Cosmos SDK Core Components
## Overview
The Cosmos SDK is a framework for building secure blockchain applications on CometBFT. It provides:
- ABCI implementation in Go
- Multi-store persistence layer
- Transaction routing system
## Transaction Flow
1. CometBFT consensus delivers transaction bytes
2. SDK decodes transactions and extracts messages
3. Messages routed to appropriate modules
4. State changes committed to stores
```mermaid
graph TD
A[CometBFT] -->|Tx Bytes| B[SDK Decode]
B -->|Messages| C[Module Router]
C -->|State Changes| D[Multi-store]
```
## `baseapp`
`baseapp` is the boilerplate implementation of a Cosmos SDK application. It comes with an implementation of the ABCI to handle the connection with the underlying consensus engine. Typically, a Cosmos SDK application extends `baseapp` by embedding it in [`app.go`](../beginner/00-app-anatomy.md#core-application-file).
Here is an example of this from `simapp`, the Cosmos SDK demonstration app:
```go reference
https://github.com/cosmos/cosmos-sdk/blob/v0.52.0-beta.1/simapp/app.go#L145-L186
```
The goal of `baseapp` is to provide a secure interface between the store and the extensible state machine while defining as little about the state machine as possible (staying true to the ABCI).
For more on `baseapp`, please click [here](../advanced/00-baseapp.md).
## Multistore
The Cosmos SDK provides a [`multistore`](../advanced/04-store.md#multistore) for persisting state. The multistore allows developers to declare any number of [`KVStores`](../advanced/04-store.md#base-layer-kvstores). These `KVStores` only accept the `[]byte` type as value and therefore any custom structure needs to be marshalled using [a codec](../advanced/05-encoding.md) before being stored.
The multistore abstraction is used to divide the state in distinct compartments, each managed by its own module. For more on the multistore, click [here](../advanced/04-store.md#multistore).
## Modules
The power of the Cosmos SDK lies in its modularity. Cosmos SDK applications are built by aggregating a collection of interoperable modules. Each module defines a subset of the state and contains its own message/transaction processor, while the Cosmos SDK is responsible for routing each message to its respective module.
Here is a simplified view of how a transaction is processed by the application of each full-node when it is received in a valid block:
```mermaid
flowchart TD
A[Transaction relayed from the full-node's CometBFT engine to the node's application via DeliverTx] --> B[APPLICATION]
B -->|"Using baseapp's methods: Decode the Tx, extract and route the message(s)"| C[Message routed to the correct module to be processed]
C --> D1[AUTH MODULE]
C --> D2[BANK MODULE]
C --> D3[STAKING MODULE]
C --> D4[GOV MODULE]
D1 -->|Handle message, Update state| E["Return result to CometBFT (0=Ok, 1=Err)"]
D2 -->|Handle message, Update state| E["Return result to CometBFT (0=Ok, 1=Err)"]
D3 -->|Handle message, Update state| E["Return result to CometBFT (0=Ok, 1=Err)"]
D4 -->|Handle message, Update state| E["Return result to CometBFT (0=Ok, 1=Err)"]
```
Each module can be seen as a little state-machine. Developers need to define the subset of the state handled by the module, as well as custom message types that modify the state (_Note:_ `messages` are extracted from `transactions` by `baseapp`). In general, each module declares its own `KVStore` in the `multistore` to persist the subset of the state it defines. Most developers will need to access other 3rd party modules when building their own modules. Given that the Cosmos SDK is an open framework, some of the modules may be malicious, which means there is a need for security principles to reason about inter-module interactions. These principles are based on [object-capabilities](../advanced/10-ocap.md). In practice, this means that instead of having each module keep an access control list for other modules, each module implements special objects called `keepers` that can be passed to other modules to grant a pre-defined set of capabilities.
Cosmos SDK modules are defined in the `x/` folder of the Cosmos SDK. Some core modules include:
- `x/auth`: Used to manage accounts and signatures.
- `x/bank`: Used to enable tokens and token transfers.
- `x/staking` + `x/slashing`: Used to build Proof-of-Stake blockchains.
In addition to the already existing modules in `x/`, which anyone can use in their app, the Cosmos SDK lets you build your own custom modules. You can check an [example of that in the tutorial](https://tutorials.cosmos.network/).# Keepers
:::note Synopsis
`Keeper`s refer to a Cosmos SDK abstraction whose role is to manage access to the subset of the state defined by various modules. `Keeper`s are module-specific, i.e. the subset of state defined by a module can only be accessed by a `keeper` defined in said module. If a module needs to access the subset of state defined by another module, a reference to the second module's internal `keeper` needs to be passed to the first one. This is done in `app.go` during the instantiation of module keepers.
:::
:::note Pre-requisite Readings
- [Introduction to Cosmos SDK Modules](./00-intro.md)
:::
## Motivation
The Cosmos SDK is a framework that makes it easy for developers to build complex decentralized applications from scratch, mainly by composing modules together. As the ecosystem of open-source modules for the Cosmos SDK expands, it will become increasingly likely that some of these modules contain vulnerabilities, as a result of the negligence or malice of their developer.
The Cosmos SDK adopts an [object-capabilities-based approach](https://docs.cosmos.network/main/learn/advanced/ocap#ocaps-in-practice) to help developers better protect their application from unwanted inter-module interactions, and `keeper`s are at the core of this approach. A `keeper` can be considered quite literally to be the gatekeeper of a module's store(s). Each store (typically an [`IAVL` Store](../../learn/advanced/04-store.md#iavl-store)) defined within a module comes with a `storeKey`, which grants unlimited access to it. The module's `keeper` holds this `storeKey` (which should otherwise remain unexposed), and defines [methods](#implementing-methods) for reading and writing to the store(s).
The core idea behind the object-capabilities approach is to only reveal what is necessary to get the work done. In practice, this means that instead of handling permissions of modules through access-control lists, module `keeper`s are passed a reference to the specific instance of the other modules' `keeper`s that they need to access (this is done in the [application's constructor function](../../learn/beginner/00-app-anatomy.md#constructor-function)). As a consequence, a module can only interact with the subset of state defined in another module via the methods exposed by the instance of the other module's `keeper`. This is a great way for developers to control the interactions that their own module can have with modules developed by external developers.
## Type Definition
`keeper`s are generally implemented in a `/keeper/keeper.go` file located in the module's folder. By convention, the type `keeper` of a module is simply named `Keeper` and usually follows the following structure:
```go
type Keeper struct {
// External keepers, if any
// Store key(s)
// codec
// authority
}
```
For example, here is the type definition of the `keeper` from the `staking` module:
```go reference
https://github.com/cosmos/cosmos-sdk/blob/v0.52.0-beta.1/x/staking/keeper/keeper.go#L54-L115
```
Let us go through the different parameters:
- An expected `keeper` is a `keeper` external to a module that is required by the internal `keeper` of said module. External `keeper`s are listed in the internal `keeper`'s type definition as interfaces. These interfaces are themselves defined in an `expected_keepers.go` file in the root of the module's folder. In this context, interfaces are used to reduce the number of dependencies, as well as to facilitate the maintenance of the module itself.
- `KVStoreService`s grant access to the store(s) of the [multistore](../../learn/advanced/04-store.md) managed by the module. They should always remain unexposed to external modules.
- `cdc` is the [codec](../../learn/advanced/05-encoding.md) used to marshal and unmarshal structs to/from `[]byte`. The `cdc` can be any of `codec.BinaryCodec`, `codec.JSONCodec` or `codec.Codec` based on your requirements. It can be either a proto or amino codec as long as they implement these interfaces.
- The authority listed is a module account or user account that has the right to change module level parameters. Previously this was handled by the param module, which has been deprecated.
Of course, it is possible to define different types of internal `keeper`s for the same module (e.g. a read-only `keeper`). Each type of `keeper` comes with its own constructor function, which is called from the [application's constructor function](../../learn/beginner/00-app-anatomy.md). This is where `keeper`s are instantiated, and where developers make sure to pass correct instances of modules' `keeper`s to other modules that require them.
## Implementing Methods
`Keeper`s primarily expose methods for business logic, as validity checks should have already been performed by the [`Msg` server](./03-msg-services.md) when `keeper`s' methods are called.
<!-- markdown-link-check-disable -->
State management is recommended to be done via [Collections](../packages/collections)
<!-- The above link is created via the script to generate docs -->
## State Management
In the Cosmos SDK, it is crucial to be methodical and selective when managing state within a module, as improper state management can lead to inefficiency, security risks, and scalability issues. Not all data belongs in the on-chain state; it's important to store only essential blockchain data that needs to be verified by consensus. Storing unnecessary information, especially client-side data, can bloat the state and slow down performance. Instead, developers should focus on using an off-chain database to handle supplementary data, extending the API as needed. This approach minimizes on-chain complexity, optimizes resource usage, and keeps the blockchain state lean and efficient, ensuring scalability and smooth operations.
The Cosmos SDK leverages Protocol Buffers (protobuf) for efficient state management, providing a well-structured, binary encoding format that ensures compatibility and performance across different modules. The SDKs recommended approach for managing state is through the [collections package](../pacakges/02-collections.md), which simplifies state handling by offering predefined data structures like maps and indexed sets, reducing the complexity of managing raw state data. While users can opt for custom encoding schemes if they need more flexibility or have specialized requirements, they should be aware that such custom implementations may not integrate seamlessly with indexers that decode state data on the fly. This could lead to challenges in data retrieval, querying, and interoperability, making protobuf a safer and more future-proof choice for most use cases.
# Folder Structure
:::note Synopsis
This document outlines the structure of Cosmos SDK modules. These ideas are meant to be applied as suggestions. Application developers are encouraged to improve upon and contribute to module structure and development design.
The required interface for a module is located in the module.go. Everything beyond this is suggestive.
:::
## Structure
A typical Cosmos SDK module can be structured as follows:
```shell
proto
└── {project_name}
   └── {module_name}
   └── {proto_version}
      ├── {module_name}.proto
      ├── genesis.proto
      ├── query.proto
      └── tx.proto
```
- `{module_name}.proto`: The module's common message type definitions.
- `genesis.proto`: The module's message type definitions related to genesis state.
- `query.proto`: The module's Query service and related message type definitions.
- `tx.proto`: The module's Msg service and related message type definitions.
```shell
x/{module_name}
├── client
│   ├── cli
│   │ ├── query.go
│   │   └── tx.go
│   └── testutil
│   ├── cli_test.go
│   └── suite.go
├── exported
│   └── exported.go
├── keeper
│   ├── genesis.go
│   ├── grpc_query.go
│   ├── hooks.go
│   ├── invariants.go
│   ├── keeper.go
│   ├── keys.go
│   ├── msg_server.go
│   └── querier.go
├── simulation
│   ├── decoder.go
│   ├── genesis.go
│   ├── operations.go
│   └── params.go
├── types
│   ├── {module_name}.pb.go
│ ├── codec.go
│ ├── errors.go
│ ├── events.go
│ ├── events.pb.go
│ ├── expected_keepers.go
│ ├── genesis.go
│ ├── genesis.pb.go
│ ├── keys.go
│ ├── msgs.go
│ ├── params.go
│ ├── query.pb.go
│ └── tx.pb.go
├── module.go
├── abci.go
├── autocli.go
├── depinject.go
└── README.md
```
- `client/`: The module's CLI client functionality implementation and the module's CLI testing suite.
- `exported/`: The module's exported types - typically interface types. If a module relies on keepers from another module, it is expected to receive the keepers as interface contracts through the `expected_keepers.go` file (see below) in order to avoid a direct dependency on the module implementing the keepers. However, these interface contracts can define methods that operate on and/or return types that are specific to the module that is implementing the keepers and this is where `exported/` comes into play. The interface types that are defined in `exported/` use canonical types, allowing for the module to receive the keepers as interface contracts through the `expected_keepers.go` file. This pattern allows for code to remain DRY and also alleviates import cycle chaos.
- `keeper/`: The module's `Keeper` and `MsgServer` implementation.
- `abci.go`: The module's `BeginBlocker` and `EndBlocker` implementations (this file is only required if `BeginBlocker` and/or `EndBlocker` need to be defined).
- `simulation/`: The module's [simulation](./14-simulator.md) package defines functions used by the blockchain simulator application (`simapp`).
- `README.md`: The module's specification documents outlining important concepts, state storage structure, and message and event type definitions. Learn more how to write module specs in the [spec guidelines](../spec/SPEC_MODULE.md).
- `types/`: includes type definitions for messages, events, and genesis state, including the type definitions generated by Protocol Buffers.
- `codec.go`: The module's registry methods for interface types.
- `errors.go`: The module's sentinel errors.
- `events.go`: The module's event types and constructors.
- `expected_keepers.go`: The module's [expected keeper](./06-keeper.md#type-definition) interfaces.
- `genesis.go`: The module's genesis state methods and helper functions.
- `keys.go`: The module's store keys and associated helper functions.
- `msgs.go`: The module's message type definitions and associated methods.
- `params.go`: The module's parameter type definitions and associated methods.
- `*.pb.go`: The module's type definitions generated by Protocol Buffers (as defined in the respective `*.proto` files above).
- The root directory includes the module's `AppModule` implementation.
- `autocli.go`: The module [autocli](https://docs.cosmos.network/main/core/autocli) options.
- `depinject.go`: The module [depinject](./15-depinject.md#type-definition) options.
> Note: although the above pattern is followed by most of the Cosmos SDK modules, there are some modules that don't follow this pattern. E.g `x/group` and `x/nft` dont have a `types` folder, instead all of the type definitions for messages, events, and genesis state are live in the root directory and the module's `AppModule` implementation lives in the `module` folder.
---
## sidebar_position: 1
# `Msg` Services
:::note Synopsis
A Protobuf `Msg` service processes [messages](./02-messages-and-queries.md#messages). Protobuf `Msg` services are specific to the module in which they are defined, and only process messages defined within the said module. They are called from `BaseApp` during [`FinalizeBlock`](../../learn/advanced/00-baseapp.md#finalizeblock).
:::
:::note Pre-requisite Readings
- [Module Manager](./01-module-manager.md)
- [Messages and Queries](./02-messages-and-queries.md)
:::
## Implementation of a module `Msg` service
Each module should define a Protobuf `Msg` service, which will be responsible for processing requests (implementing `sdk.Msg`) and returning responses.
As further described in [ADR 031](../architecture/adr-031-msg-service.md), this approach has the advantage of clearly specifying return types and generating server and client code.
Protobuf generates a `MsgServer` interface based on the definition of `Msg` service. It is the role of the module developer to implement this interface, by implementing the state transition logic that should happen upon receival of each `transaction.Msg`. As an example, here is the generated `MsgServer` interface for `x/bank`, which exposes two `transaction.Msg`s:
```go reference
https://github.com/cosmos/cosmos-sdk/blob/28fa3b8/x/bank/types/tx.pb.go#L564-L579
```
When possible, the existing module's [`Keeper`](./06-keeper.md) should implement `MsgServer`, otherwise a `msgServer` struct that embeds the `Keeper` can be created, typically in `./keeper/msg_server.go`:
```go reference
https://github.com/cosmos/cosmos-sdk/blob/28fa3b8/x/bank/keeper/msg_server.go#L16-L19
```
`msgServer` methods can retrieve the auxiliary information or services using the environment variable, it is always located in the keeper:
Environment:
```go reference
https://github.com/cosmos/cosmos-sdk/blob/07151304e2ec6a185243d083f59a2d543253cb15/core/appmodule/v2/environment.go#L14-L29
```
Keeper Example:
```go reference
https://github.com/cosmos/cosmos-sdk/blob/07151304e2ec6a185243d083f59a2d543253cb15/x/bank/keeper/keeper.go#L56-L58
```
`transaction.Msg` processing usually follows these 3 steps:
### Validation
The message server must perform all validation required (both _stateful_ and _stateless_) to make sure the `message` is valid.
The `signer` is charged for the gas cost of this validation.
For example, a `msgServer` method for a `transfer` message should check that the sending account has enough funds to actually perform the transfer.
It is recommended to implement all validation checks in a separate function that passes state values as arguments. This implementation simplifies testing. As expected, expensive validation functions charge additional gas. Example:
```go
ValidateMsgA(msg MsgA, now Time, gm GasMeter) error {
if now.Before(msg.Expire) {
return sdkerrors.ErrInvalidRequest.Wrap("msg expired")
}
gm.ConsumeGas(1000, "signature verification")
return signatureVerificaton(msg.Prover, msg.Data)
}
```
:::warning
Previously, the `ValidateBasic` method was used to perform simple and stateless validation checks.
This way of validating is deprecated, this means the `msgServer` must perform all validation checks.
:::
### State Transition
After the validation is successful, the `msgServer` method uses the [`keeper`](./06-keeper.md) functions to access the state and perform a state transition.
### Events
Before returning, `msgServer` methods generally emit one or more [events](../../learn/advanced/08-events.md) by using the `EventManager` held in `environment`.
There are two ways to emit events, typed events using protobuf or arbitrary key & values.
Typed Events:
```go
ctx.EventManager().EmitTypedEvent(
&group.EventABC{Key1: Value1, Key2, Value2})
```
Arbitrary Events:
```go
ctx.EventManager().EmitEvent(
sdk.NewEvent(
eventType, // e.g. sdk.EventTypeMessage for a message, types.CustomEventType for a custom event defined in the module
sdk.NewAttribute(key1, value1),
sdk.NewAttribute(key2, value2),
),
)
```
These events are relayed back to the underlying consensus engine and can be used by service providers to implement services around the application. Click [here](../../learn/advanced/08-events.md) to learn more about events.
The invoked `msgServer` method returns a `proto.Message` response and an `error`. These return values are then wrapped into an `*sdk.Result` or an `error`:
```go reference
https://github.com/cosmos/cosmos-sdk/blob/v0.50.0-alpha.0/baseapp/msg_service_router.go#L160
```
This method takes care of marshaling the `res` parameter to protobuf and attaching any events on the `EventManager()` to the `sdk.Result`.
```protobuf reference
https://github.com/cosmos/cosmos-sdk/blob/v0.50.0-alpha.0/proto/cosmos/base/abci/v1beta1/abci.proto#L93-L113
```
This diagram shows a typical structure of a Protobuf `Msg` service, and how the message propagates through the module.
```mermaid
sequenceDiagram
participant User
participant baseApp
participant router
participant handler
participant msgServer
participant keeper
participant EventManager
User->>baseApp: Transaction Type<Tx>
baseApp->>router: Route(ctx, msgRoute)
router->>handler: handler
handler->>msgServer: Msg<Tx>(Context, Msg(..))
alt addresses invalid, denominations wrong, etc.
msgServer->>handler: error
handler->>router: error
router->>baseApp: result, error code
else
msgServer->>keeper: perform action, update context
keeper->>msgServer: results, error code
msgServer->>EventManager: Emit relevant events
msgServer->>msgServer: maybe wrap results in more structure
msgServer->>handler: result, error code
handler->>router: result, error code
router->>baseApp: result, error code
end
baseApp->>User: result, error code
```
## Telemetry
New [telemetry metrics](../../learn/advanced/09-telemetry.md) can be created from `msgServer` methods when handling messages.
This is an example from the `x/auth/vesting` module:
```go reference
https://github.com/cosmos/cosmos-sdk/blob/v0.50.0-alpha.0/x/auth/vesting/msg_server.go#L76-L88
```
:::Warning
Telemetry adds a performance overhead to the chain. It is recommended to only use this in critical paths
:::
---
## sidebar_position: 1
# Query Services
:::note Synopsis
A Protobuf Query service processes [`queries`](./02-messages-and-queries.md#queries). Query services are specific to the module in which they are defined, and only process `queries` defined within said module. They are called from `BaseApp`'s [`Query` method](../../learn/advanced/00-baseapp.md#query).
:::
:::note Pre-requisite Readings
- [Module Manager](./01-module-manager.md)
- [Messages and Queries](./02-messages-and-queries.md)
:::
## Implementation of a module query service
### gRPC Service
When defining a Protobuf `Query` service, a `QueryServer` interface is generated for each module with all the service methods:
```go
type QueryServer interface {
QueryBalance(context.Context, *QueryBalanceParams) (*types.Coin, error)
QueryAllBalances(context.Context, *QueryAllBalancesParams) (*QueryAllBalancesResponse, error)
}
```
These custom queries methods should be implemented by a module's keeper, typically in `./keeper/grpc_query.go`. The first parameter of these methods is a generic `context.Context`. Therefore, the Cosmos SDK provides a function `sdk.UnwrapSDKContext` to retrieve the `context.Context` from the provided
`context.Context`.
Here's an example implementation for the bank module:
```go reference
https://github.com/cosmos/cosmos-sdk/blob/v0.50.0-alpha.0/x/bank/keeper/grpc_query.go
```
### Calling queries from the State Machine
The Cosmos SDK v0.47 introduces a new `cosmos.query.v1.module_query_safe` Protobuf annotation which is used to state that a query that is safe to be called from within the state machine, for example:
- a Keeper's query function can be called from another module's Keeper,
- ADR-033 intermodule query calls,
- CosmWasm contracts can also directly interact with these queries.
If the `module_query_safe` annotation set to `true`, it means:
- The query is deterministic: given a block height it will return the same response upon multiple calls, and doesn't introduce any state-machine breaking changes across SDK patch versions.
- Gas consumption never fluctuates across calls and across patch versions.
If you are a module developer and want to use `module_query_safe` annotation for your own query, you have to ensure the following things:
- the query is deterministic and won't introduce state-machine-breaking changes without coordinated upgrades
- it has its gas tracked, to avoid the attack vector where no gas is accounted for
on potentially high-computation queries.
***
sidebar_position: 1
---
# Blockchain Architecture
## Introduction
Blockchain architecture is a complex topic that involves many different components. In this section, we will cover the main layers of a blockchain application built with the Cosmos SDK.
At its core, a blockchain is a replicated deterministic state machine. This document explores the various layers of blockchain architecture, focusing on the execution, settlement, consensus, data availability, and interoperability layers.
```mermaid
graph TD
A[Modular SDK Blockchain Architecture]
A --> B[Execution Layer]
A --> C[Settlement Layer]
A --> D[Consensus Layer]
D --> E[Data Availability Layer]
A --> F[Interoperability Layer]
```
## Layered Architecture
Understanding blockchain architecture through the lens of different layers helps in comprehending its complex functionalities. We will give a high-level overview of the execution layer, settlement layer, consensus layer, data availability layer, and interoperability layer.
## Execution Layer
The Execution Layer is where the blockchain processes and executes transactions. The state machine within the blockchain handles the execution of transaction logic. This is done by the blockchain itself, ensuring that every transaction follows the predefined rules and state transitions. When a transaction is submitted, the execution layer processes it, updates the state, and ensures that the output is deterministic and consistent across all nodes. In the context of the Cosmos SDK, this typically involves predefined modules and transaction types rather than general-purpose smart contracts, which are used in chains with CosmWasm.
### State machine
At its core, a blockchain is a [replicated deterministic state machine](https://en.wikipedia.org/wiki/State_machine_replication).
A state machine is a computer science concept whereby a machine can have multiple states, but only one at any given time. There is a `state`, which describes the current state of the system, and `transactions`, that trigger state transitions.
Given a state S and a transaction T, the state machine will return a new state S'.
```mermaid
flowchart LR
A[S]
B[S']
A -->|"apply(T)"| B
```
In practice, the transactions are bundled in blocks to make the process more efficient. Given a state S and a block of transactions B, the state machine will return a new state S'.
```mermaid
flowchart LR
A[S]
B[S']
A -->|"For each T in B: apply(T)"| B
```
In a blockchain context, the state machine is [deterministic](https://en.wikipedia.org/wiki/Deterministic_system). This means that if a node is started at a given state and replays the same sequence of transactions, it will always end up with the same final state.
The Cosmos SDK gives developers maximum flexibility to define the state of their application, transaction types and state transition functions. The process of building state machines with the Cosmos SDK will be described more in-depth in the following sections. But first, let us see how the state machine is replicated using various consensus engines, such as CometBFT.
## Settlement Layer
The Settlement Layer is responsible for finalising and recording transactions on the blockchain. This layer ensures that all transactions are accurately settled and immutable, providing a verifiable record of all activities on the blockchain. It is critical for maintaining the integrity and trustworthiness of the blockchain.
The settlement layer can be performed on the chain itself or it can be externalised, allowing for the possibility of plugging in a different settlement layer as needed. For example if we were to use Rollkit and celestia for our Data Availability and Consensus, we could separate our settlement layer by introducing fraud or validity proofs. From there the settlement layer can create trust-minimised light clients, further enhancing security and efficiency. This process ensures that all transactions are accurately finalized and immutable, providing a verifiable record of all activities.
## Consensus Layer
The Consensus Layer ensures that all nodes in the network agree on the order and validity of transactions. This layer uses consensus algorithms like Byzantine Fault Tolerance (BFT) or Proof of Stake (PoS) to achieve agreement, even in the presence of malicious nodes. Consensus is crucial for maintaining the security and reliability of the blockchain.
What has been a default consensus engine in the Cosmos SDK has been CometBFT. In the most recent releases we have been moving away from this and allowing users to plug and play their own consensus engines. This is a big step forward for the Cosmos SDK as it allows for more flexibility and customisation. Other consensus engine options for example can be Rollkit with Celestias Data Availability Layer.
Here is an example of how the consensus layer works with CometBFT in the context of the Cosmos SDK:
### CometBFT
Thanks to the Cosmos SDK, developers just have to define the state machine, and [_CometBFT_](https://docs.cometbft.com/v1.0/explanation/introduction/) will handle replication over the network for them.
```mermaid
flowchart TD
subgraph Blockchain_Node[Blockchain Node]
subgraph SM[State-machine]
direction TB
SM1[Cosmos SDK]
end
subgraph CometBFT[CometBFT]
direction TB
Consensus
Networking
end
end
SM <--> CometBFT
Blockchain_Node -->|Includes| SM
Blockchain_Node -->|Includes| CometBFT
```
[CometBFT](https://docs.cometbft.com/v1.0/explanation/introduction/) is an application-agnostic engine that is responsible for handling the _networking_ and _consensus_ layers of a blockchain. In practice, this means that CometBFT is responsible for propagating and ordering transaction bytes. CometBFT relies on an eponymous Byzantine-Fault-Tolerant (BFT) algorithm to reach consensus on the order of transactions.
The [consensus algorithm adopted by CometBFT](https://docs.cometbft.com/v1.0/explanation/introduction/#consensus-overview) works with a set of special nodes called _Validators_. Validators are responsible for adding blocks of transactions to the blockchain. At any given block, there is a validator set V. A validator in V is chosen by the algorithm to be the proposer of the next block. This block is considered valid if more than two thirds of V signed a `prevote` and a `precommit` on it, and if all the transactions that it contains are valid. The validator set can be changed by rules written in the state-machine.
## ABCI
CometBFT passes transactions to the application through an interface called the [ABCI](https://docs.cometbft.com/v1.0/spec/abci/), which the application must implement.
```mermaid
graph TD
A[Application]
B[CometBFT]
A <-->|ABCI| B
```
Note that **CometBFT only handles transaction bytes**. It has no knowledge of what these bytes mean. All CometBFT does is order these transaction bytes deterministically. CometBFT passes the bytes to the application via the ABCI, and expects a return code to inform it if the messages contained in the transactions were successfully processed or not.
Here are the most important messages of the ABCI:
- `CheckTx`: When a transaction is received by CometBFT, it is passed to the application to check if a few basic requirements are met. `CheckTx` is used to protect the mempool of full-nodes against spam transactions. A special handler called the [`AnteHandler`](../beginner/04-gas-fees.md#antehandler) is used to execute a series of validation steps such as checking for sufficient fees and validating the signatures. If the checks are valid, the transaction is added to the [mempool](https://docs.cometbft.com/v1.0/explanation/core/mempool) and relayed to peer nodes. Note that transactions are not processed (i.e. no modification of the state occurs) with `CheckTx` since they have not been included in a block yet.
- `DeliverTx`: When a [valid block](https://docs.cometbft.com/v1.0/spec/core/data_structures#block) is received by CometBFT, each transaction in the block is passed to the application via `DeliverTx` in order to be processed. It is during this stage that the state transitions occur. The `AnteHandler` executes again, along with the actual [`Msg` service](../../build/building-modules/03-msg-services.md) RPC for each message in the transaction.
- `BeginBlock`/`EndBlock`: These messages are executed at the beginning and the end of each block, whether the block contains transactions or not. It is useful to trigger automatic execution of logic. Proceed with caution though, as computationally expensive loops could slow down your blockchain, or even freeze it if the loop is infinite.
Find a more detailed view of the ABCI methods from the [CometBFT docs](https://docs.cometbft.com/v1.0/spec/abci/).
Any application built on CometBFT needs to implement the ABCI interface in order to communicate with the underlying local CometBFT engine. Fortunately, you do not have to implement the ABCI interface. The Cosmos SDK provides a boilerplate implementation of it in the form of [baseapp](./03-sdk-design.md#baseapp).
## Data Availability Layer
The Data Availability (DA) Layer is a critical component of within the umbrella of the consensus layer that ensures all necessary data for transactions is available to all network participants. This layer is essential for preventing data withholding attacks, where some nodes might attempt to disrupt the network by not sharing critical transaction data.
If we use the example of Rollkit, a user initiates a transaction, which is then propagated through the rollup network by a light node. The transaction is validated by full nodes and aggregated into a block by the sequencer. This block is posted to a data availability layer like Celestia, ensuring the data is accessible and correctly ordered. The rollup light node verifies data availability from the DA layer. Full nodes then validate the block and generate necessary proofs, such as fraud proofs for optimistic rollups or zk-SNARKs/zk-STARKs for zk-rollups. These proofs are shared across the network and verified by other nodes, ensuring the rollup's integrity. Once all validations are complete, the rollup's state is updated, finalising the transaction
## Interoperability Layer
The Interoperability Layer enables communication and interaction between different blockchains. This layer facilitates cross-chain transactions and data sharing, allowing various blockchain networks to interoperate seamlessly. Interoperability is key for building a connected ecosystem of blockchains, enhancing their functionality and reach.
In this case we have separated the layers even further to really illustrate the components that make-up the blockchain architecture and it is important to note that the Cosmos SDK is designed to be interoperable with other blockchains. This is achieved through the use of the [Inter-Blockchain Communication (IBC) protocol](https://www.ibcprotocol.dev/), which allows different blockchains to communicate and transfer assets between each other.
---
## sidebar_position: 1
# Application-Specific Blockchains
:::note Synopsis
This document explains what application-specific blockchains are, and why developers would want to build one as opposed to writing Smart Contracts.
:::
## What are application-specific blockchains
Application-specific blockchains are blockchains customized to operate a single application. Instead of building a decentralized application on top of an underlying blockchain like Ethereum, developers build their own blockchain from the ground up. This means building a full-node client, a light-client, and all the necessary interfaces (CLI, REST, ...) to interact with the nodes.
```mermaid
flowchart TD
subgraph Blockchain_Node[Blockchain Node]
subgraph SM[State-machine]
direction TB
SM1[Cosmos SDK]
end
subgraph Consensus[Consensus]
direction TB
end
subgraph Networking[Networking]
direction TB
end
end
SM <--> Consensus
Consensus <--> Networking
Blockchain_Node -->|Includes| SM
Blockchain_Node -->|Includes| Consensus
Blockchain_Node -->|Includes| Networking
```
## What are the shortcomings of Smart Contracts
Virtual-machine blockchains like Ethereum addressed the demand for more programmability back in 2014. At the time, the options available for building decentralized applications were quite limited. Most developers would build on top of the complex and limited Bitcoin scripting language, or fork the Bitcoin codebase which was hard to work with and customize.
Virtual-machine blockchains came in with a new value proposition. Their state-machine incorporates a virtual-machine that is able to interpret turing-complete programs called Smart Contracts. These Smart Contracts are very good for use cases like one-time events (e.g. ICOs), but they can fall short for building complex decentralized platforms. Here is why:
- Smart Contracts are generally developed with specific programming languages that can be interpreted by the underlying virtual-machine. These programming languages are often immature and inherently limited by the constraints of the virtual-machine itself. For example, the Ethereum Virtual Machine does not allow developers to implement automatic execution of code. Developers are also limited to the account-based system of the EVM, and they can only choose from a limited set of functions for their cryptographic operations. These are examples, but they hint at the lack of **flexibility** that a smart contract environment often entails.
- Smart Contracts are all run by the same virtual machine. This means that they compete for resources, which can severely restrain **performance**. And even if the state-machine were to be split in multiple subsets (e.g. via sharding), Smart Contracts would still need to be interpreted by a virtual machine, which would limit performance compared to a native application implemented at state-machine level (our benchmarks show an improvement on the order of 10x in performance when the virtual-machine is removed).
- Another issue with the fact that Smart Contracts share the same underlying environment is the resulting limitation in **sovereignty**. A decentralized application is an ecosystem that involves multiple players. If the application is built on a general-purpose virtual-machine blockchain, stakeholders have very limited sovereignty over their application, and are ultimately superseded by the governance of the underlying blockchain. If there is a bug in the application, very little can be done about it.
Application-Specific Blockchains are designed to address these shortcomings.
## Application-Specific Blockchains Benefits
### Flexibility
Application-specific blockchains give maximum flexibility to developers:
- In Cosmos blockchains, the state-machine is typically connected to the underlying consensus engine via an interface called the [ABCI](https://docs.cometbft.com/v1.0/spec/abci/) (Application Blockchain Interface). This interface can be wrapped in any programming language, meaning developers can build their state-machine in the programming language of their choice.
- Developers can choose among multiple frameworks to build their state-machine. The most widely used today is the Cosmos SDK, but others exist (e.g. [Lotion](https://github.com/nomic-io/lotion), [Weave](https://github.com/iov-one/weave), ...). Typically the choice will be made based on the programming language they want to use (Cosmos SDK and Weave are in Golang, Lotion is in Javascript, ...).
- The ABCI also allows developers to swap the consensus engine of their application-specific blockchain. Today, only CometBFT is production-ready, but in the future other consensus engines are expected to emerge.
- Even when they settle for a framework and consensus engine, developers still have the freedom to tweak them if they don't perfectly match their requirements in their pristine forms.
- Developers are free to explore the full spectrum of tradeoffs (e.g. number of validators vs transaction throughput, safety vs availability in asynchrony, ...) and design choices (DB or IAVL tree for storage, UTXO or account model, ...).
- Developers can implement automatic execution of code. In the Cosmos SDK, logic can be automatically triggered at the beginning and the end of each block. They are also free to choose the cryptographic library used in their application, as opposed to being constrained by what is made available by the underlying environment in the case of virtual-machine blockchains.
The list above contains a few examples that show how much flexibility application-specific blockchains give to developers. The goal of Cosmos and the Cosmos SDK is to make developer tooling as generic and composable as possible, so that each part of the stack can be forked, tweaked and improved without losing compatibility. As the community grows, more alternatives for each of the core building blocks will emerge, giving more options to developers.
### Performance
Decentralized applications built with Smart Contracts are inherently capped in performance by the underlying environment. For a decentralized application to optimise performance, it needs to be built as an application-specific blockchain. Next are some of the benefits an application-specific blockchain brings in terms of performance:
- Developers of application-specific blockchains can choose to operate with a novel consensus engine such as CometBFT.
- An application-specific blockchain only operates a single application, so that the application does not compete with others for computation and storage. This is the opposite of most non-sharded virtual-machine blockchains today, where smart contracts all compete for computation and storage.
- Even if a virtual-machine blockchain offered application-based sharding coupled with an efficient consensus algorithm, performance would still be limited by the virtual-machine itself. The real throughput bottleneck is the state-machine, and requiring transactions to be interpreted by a virtual-machine significantly increases the computational complexity of processing them.
### Security
Security is hard to quantify, and greatly varies from platform to platform. That said here are some important benefits an application-specific blockchain can bring in terms of security:
- Developers can choose proven programming languages like Go when building their application-specific blockchains, as opposed to smart contract programming languages that are often more immature.
- Developers are not constrained by the cryptographic functions made available by the underlying virtual-machines. They can use their own custom cryptography, and rely on well-audited crypto libraries.
- Developers do not have to worry about potential bugs or exploitable mechanisms in the underlying virtual-machine, making it easier to reason about the security of the application.
### Sovereignty
One of the major benefits of application-specific blockchains is sovereignty. A decentralized application is an ecosystem that involves many actors: users, developers, third-party services, and more. When developers build on virtual-machine blockchain where many decentralized applications coexist, the community of the application is different than the community of the underlying blockchain, and the latter supersedes the former in the governance process. If there is a bug or if a new feature is needed, stakeholders of the application have very little leeway to upgrade the code. If the community of the underlying blockchain refuses to act, nothing can happen.
The fundamental issue here is that the governance of the application and the governance of the network are not aligned. This issue is solved by application-specific blockchains. Because application-specific blockchains specialize to operate a single application, stakeholders of the application have full control over the entire chain. This ensures that the community will not be stuck if a bug is discovered, and that it has the freedom to choose how it is going to evolve.

141
.github/aider/guides/sonr-did.md vendored Normal file
View File

@ -0,0 +1,141 @@
# `x/did`
The Decentralized Identity module is responsible for managing native Sonr Accounts, their derived wallets, and associated user identification information.
## State
The DID module maintains several key state structures:
### Controller State
The Controller state represents a Sonr DWN Vault. It includes:
- Unique identifier (number)
- DID
- Sonr address
- Ethereum address
- Bitcoin address
- Public key
- Keyshares pointer
- Claimed block
- Creation block
### Assertion State
The Assertion state includes:
- DID
- Controller
- Subject
- Public key
- Assertion type
- Accumulator (metadata)
- Creation block
### Authentication State
The Authentication state includes:
- DID
- Controller
- Subject
- Public key
- Credential ID
- Metadata
- Creation block
### Verification State
The Verification state includes:
- DID
- Controller
- DID method
- Issuer
- Subject
- Public key
- Verification type
- Metadata
- Creation block
## State Transitions
State transitions are triggered by the following messages:
- LinkAssertion
- LinkAuthentication
- UnlinkAssertion
- UnlinkAuthentication
- ExecuteTx
- UpdateParams
## Messages
The DID module defines the following messages:
1. MsgLinkAuthentication
2. MsgLinkAssertion
3. MsgExecuteTx
4. MsgUnlinkAssertion
5. MsgUnlinkAuthentication
6. MsgUpdateParams
Each message triggers specific state machine behaviors related to managing DIDs, authentications, assertions, and module parameters.
## Query
The DID module provides the following query endpoints:
1. Params: Query all parameters of the module
2. Resolve: Query the DID document by its ID
3. Sign: Sign a message with the DID document
4. Verify: Verify a message with the DID document
## Params
The module parameters include:
- Allowed public keys (map of KeyInfo)
- Conveyance preference
- Attestation formats
## Client
The module provides gRPC and REST endpoints for all defined messages and queries.
## Future Improvements
Potential future improvements could include:
1. Enhanced privacy features for DID operations
2. Integration with more blockchain networks
3. Support for additional key types and cryptographic algorithms
4. Improved revocation mechanisms for credentials and assertions
## Tests
Acceptance tests should cover all major functionality, including:
- Creating and managing DIDs
- Linking and unlinking assertions and authentications
- Executing transactions with DIDs
- Querying and resolving DIDs
- Parameter updates
## Appendix
### Account
An Account represents a user's identity within the Sonr ecosystem. It includes information such as the user's public key, associated wallets, and other identification details.
### Decentralized Identifier (DID)
A Decentralized Identifier (DID) is a unique identifier that is created, owned, and controlled by the user. It is used to establish a secure and verifiable digital identity.
### Verifiable Credential (VC)
A Verifiable Credential (VC) is a digital statement that can be cryptographically verified. It contains claims about a subject (e.g., a user) and is issued by a trusted authority.
### Key Types
The module supports various key types, including:
- Role
- Algorithm (e.g., ES256, EdDSA, ES256K)
- Encoding (e.g., hex, base64, multibase)
- Curve (e.g., P256, P384, P521, X25519, X448, Ed25519, Ed448, secp256k1)
### JSON Web Key (JWK)
The module supports JSON Web Keys (JWK) for representing cryptographic keys, including properties such as key type (kty), curve (crv), and coordinates (x, y) for EC and OKP keys, as well as modulus (n) and exponent (e) for RSA keys.

145
.github/aider/guides/sonr-dwn.md vendored Normal file
View File

@ -0,0 +1,145 @@
# `x/dwn`
The DWN module is responsible for the management of IPFS deployed Decentralized Web Nodes (DWNs) and their associated data.
## Concepts
The DWN module introduces several key concepts:
1. Decentralized Web Node (DWN): A distributed network for storing and sharing data.
2. Schema: A structure defining the format of various data types in the dwn.
3. IPFS Integration: The module can interact with IPFS for decentralized data storage.
## State
The DWN module maintains the following state:
### DWN State
The DWN state is stored using the following structure:
```protobuf
message DWN {
uint64 id = 1;
string alias = 2;
string cid = 3;
string resolver = 4;
}
```
This state is indexed by ID, alias, and CID for efficient querying.
### Params State
The module parameters are stored in the following structure:
```protobuf
message Params {
bool ipfs_active = 1;
bool local_registration_enabled = 2;
Schema schema = 4;
}
```
### Schema State
The Schema state defines the structure for various data types:
```protobuf
message Schema {
int32 version = 1;
string account = 2;
string asset = 3;
string chain = 4;
string credential = 5;
string did = 6;
string jwk = 7;
string grant = 8;
string keyshare = 9;
string profile = 10;
}
```
## State Transitions
State transitions in the DWN module are primarily triggered by:
1. Updating module parameters
2. Allocating new dwns
3. Syncing DID documents
## Messages
The DWN module defines the following message:
1. `MsgUpdateParams`: Used to update the module parameters.
```protobuf
message MsgUpdateParams {
string authority = 1;
Params params = 2;
}
```
## Begin Block
No specific begin-block operations are defined for this module.
## End Block
No specific end-block operations are defined for this module.
## Hooks
The DWN module does not define any hooks.
## Events
The DWN module does not explicitly define any events. However, standard Cosmos SDK events may be emitted during state transitions.
## Client
The DWN module provides the following gRPC query endpoints:
1. `Params`: Queries all parameters of the module.
2. `Schema`: Queries the DID document schema.
3. `Allocate`: Initializes a Target DWN available for claims.
4. `Sync`: Queries the DID document by its ID and returns required information.
## Params
The module parameters include:
- `ipfs_active` (bool): Indicates if IPFS integration is active.
- `local_registration_enabled` (bool): Indicates if local registration is enabled.
- `schema` (Schema): Defines the structure for various data types in the dwn.
## Future Improvements
Potential future improvements could include:
1. Enhanced IPFS integration features.
2. Additional authentication mechanisms beyond WebAuthn.
3. Improved DID document management and querying capabilities.
## Tests
Acceptance tests should cover:
1. Parameter updates
2. DWN state management
3. Schema queries
4. DWN allocation process
5. DID document syncing
## Appendix
| Concept | Description |
| ------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| Decentralized Web Node (DWN) | A decentralized, distributed, and secure network of nodes that store and share data. It is a decentralized alternative to traditional web hosting services. |
| Decentralized Identifier (DID) | A unique identifier that is created, owned, and controlled by the user. It is used to establish a secure and verifiable digital identity. |
| HTMX (Hypertext Markup Language eXtensions) | A set of extensions to HTML that allow for the creation of interactive web pages. It is used to enhance the user experience and provide additional functionality to web applications. |
| IPFS (InterPlanetary File System) | A decentralized, peer-to-peer network for storing and sharing data. It is a distributed file system that allows for the creation and sharing of content across a network of nodes. |
| WebAuthn (Web Authentication) | A set of APIs that allow websites to request user authentication using biometric or non-biometric factors. |
| WebAssembly (Web Assembly) | A binary instruction format for a stack-based virtual machine. |
| Verifiable Credential (VC) | A digital statement that can be cryptographically verified. |

91
.github/aider/guides/sonr-service.md vendored Normal file
View File

@ -0,0 +1,91 @@
# `x/svc`
The svc module is responsible for managing the registration and authorization of services within the Sonr ecosystem. It provides a secure and verifiable mechanism for registering and authorizing services using Decentralized Identifiers (DIDs).
## Concepts
- **Service**: A decentralized svc on the Sonr Blockchain with properties such as ID, authority, origin, name, description, category, tags, and expiry height.
- **Profile**: Represents a DID alias with properties like ID, subject, origin, and controller.
- **Metadata**: Contains information about a svc, including name, description, category, icon, and tags.
### Dependencies
- [x/did](https://github.com/onsonr/sonr/tree/master/x/did)
- [x/group](https://github.com/onsonr/sonr/tree/master/x/group)
- [x/nft](https://github.com/onsonr/sonr/tree/master/x/nft)
## State
The module uses the following state structures:
### Metadata
Stores information about services:
- Primary key: `id` (auto-increment)
- Unique index: `origin`
- Fields: id, origin, name, description, category, icon (URI), tags
### Profile
Stores DID alias information:
- Primary key: `id`
- Unique index: `subject,origin`
- Fields: id, subject, origin, controller
## Messages
### MsgUpdateParams
Updates the module parameters. Can only be executed by the governance account.
### MsgRegisterService
Registers a new svc on the blockchain. Requires a valid TXT record in DNS for the origin.
## Params
The module has the following parameters:
- `categories`: List of allowed svc categories
- `types`: List of allowed svc types
## Query
The module provides the following query:
### Params
Retrieves all parameters of the module.
## Client
### gRPC
The module provides a gRPC Query svc with the following RPC:
- `Params`: Get all parameters of the module
### CLI
(TODO: Add CLI commands for interacting with the module)
## Events
(TODO: List and describe event tags used by the module)
## Future Improvements
- Implement svc discovery mechanisms
- Add support for svc reputation and rating systems
- Enhance svc metadata with more detailed information
- Implement svc update and deactivation functionality
## Tests
(TODO: Add acceptance tests for the module)
## Appendix
This module is part of the Sonr blockchain project and interacts with other modules such as DID and NFT modules to provide a comprehensive decentralized svc ecosystem.

0
.github/aider/guides/sonr-token.md vendored Normal file
View File

897
.github/aider/guides/ucan-spec.md vendored Normal file
View File

@ -0,0 +1,897 @@
# User Controlled Authorization Network (UCAN) Specification
## Version 1.0.0-rc.1
## Editors
- [Brooklyn Zelenka], [Witchcraft Software]
## Authors
- [Irakli Gozalishvili], [Protocol Labs]
- [Daniel Holmgren], [Bluesky]
- [Philipp Krüger], [number zero]
- [Brooklyn Zelenka], [Witchcraft Software]
## Sub-Specifications
- [UCAN Delegation][delegation]
- [UCAN Invocation][invocation]
- [UCAN Promise][promise]
- [UCAN Revocation][revocation]
## Language
The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD", "SHOULD NOT", "RECOMMENDED", "NOT RECOMMENDED", "MAY", and "OPTIONAL" in this document are to be interpreted as described in [BCP 14] when, and only when, they appear in all capitals, as shown here.
# Abstract
User-Controlled Authorization Network (UCAN) is a [trustless], secure, [local-first], user-originated, distributed authorization scheme. This document provides a high level overview of the components of the system, concepts, and motivation. Exact formats are given in [sub-specifications].
# Introduction
User-Controlled Authorization Network (UCAN) is a [trustless], secure, [local-first], user-originated, distributed authorization scheme. It provides public-key verifiable, delegable, expressive, openly extensible [capabilities]. UCANs achieve public verifiability with late-bound certificate chains and principals represented by [decentralized identifiers (DIDs)][DID].
UCAN improves the familiarity and adoptability of schemes like [SPKI/SDSI][SPKI] for web and native application contexts. UCAN allows for the creation, delegation, and invocation of authority by any agent with a DID, including traditional systems and peer-to-peer architectures beyond traditional cloud computing.
## Motivation
> If we practice our principles, we could have both security and functionality. Treating security as a separate concern has not succeeded in bridging the gap between principle and practice, because it operates without knowledge of what constitutes least authority.
>
> — [Miller][Mark Miller] et al, [The Structure of Authority]
Since at least [Multics], access control lists ([ACL]s) have been the most popular form of digital authorization, where a list of what each user is allowed to do is maintained on the resource. ACLs (and later [RBAC]) have been a successful model suited to architectures where persistent access to a single list is viable. ACLs require that rules are sufficiently well specified, such as in a centralized database with rules covering all possible permutations of scenario. This both imposes a very high maintenance burden on programmers as a systems grows in complexity, and is a key vector for [confused deputies][confused deputy problem].
With increasing interconnectivity between machines becoming commonplace, authorization needs to scale to meet the load demands of distributed systems while providing partition tolerance. However, it is not always practical to maintain a single central authorization source. Even when copies of the authorization list are distributed to the relevant servers, latency and partitions introduce troublesome challenges with conflicting updates, to say nothing of storage requirements.
A large portion of personal information now also moves through connected systems. As a result, data privacy is a prominent theme when considering the design of modern applications, to the point of being legislated in parts of the world.
Ahead-of-time coordination is often a barrier to development in many projects. Flexibility to define specialized authorization semantics for resources and the ability to integrate with external systems trustlessly are essential as the number of autonomous, specialized, and coordinated applications increases.
Many high-value applications run in hostile environments. In recognition of this, many vendors now include public key functionality, such as [non-extractable keys in browsers][browser api crypto key], [certificate systems for external keys][fido], [platform keys][passkey], and [secure hardware enclaves] in widespread consumer devices.
Two related models that work exceptionally well in the above context are Simple Public Key Infrastructure ([SPKI][spki rfc]) and object capabilities ([OCAP]). Since offline operation and self-verifiability are two requirements, UCAN adopts a [certificate capability model] related to [SPKI].
## Intuition for Auth System Differences
The following analogies illustrate several significant trade-offs between these systems but are only accurate enough to build intuition. A good resource for a more thorough presentation of these trade-offs is [Capability Myths Demolished]. In this framework, UCAN approximates SPKI with some dynamic features.
### Access Control Lists
By analogy, ACLs are like a bouncer at an exclusive event. This bouncer has a list attendees allowed in and which of those are VIPs that get extra access. People trying to get in show their government-issued ID and are accepted or rejected. In addition, they may get a lanyard to identify that they have previously been allowed in. If someone is disruptive, they can simply be crossed off the list and denied further entry.
If there are many such events at many venues, the organizers need to coordinate ahead of time, denials need to be synchronized, and attendees need to show their ID cards to many bouncers. The likelihood of the bouncer letting in the wrong person due to synchronization lag or confusion by someone sharing a name is nonzero.
### Certificate Capabilities
UCANs work more like [movie tickets][caps as keys] or a festival pass. No one needs to check your ID; who you are is irrelevant. For example, if you have a ticket issued by the theater to see Citizen Kane, you are admitted to Theater 3. If you cannot attend an event, you can hand this ticket to a friend who wants to see the film instead, and there is no coordination required with the theater ahead of time. However, if the theater needs to cancel tickets for some reason, they need a way of uniquely identifying them and sharing this information between them.
### Object Capabilities
Object capability ("ocap") systems use a combination of references, encapsulated state, and proxy forwarding. As the name implies, this is fairly close to object-oriented or actor-based systems. Object capabilities are [robust][Robust Composition], flexible, and expressive.
To achieve these properties, object capabilities have two requirements: [fail-safe], and locality preservation. The emphasis on consistency rules out partition tolerance[^pcec].
## Security Considerations
Each UCAN includes an assertions of what it is allowed to do. "Proofs" are positive evidence (elsewhere called "witnesses") of the possession of rights. They are cryptographically verifiable chains showing that the UCAN issuer either claims to directly own a resource, or that it was delegated to them by some claimed owner. In the most common case, the root owner's ID is the only globally unique identity for the resource.
Root capability issuers function as verifiable, distributed roots of trust. The delegation chain is by definition a provenance log. Private keys themselves SHOULD NOT move from one context to another. Keeping keys unique to each physical device and unique per use case is RECOMMENDED to reduce opportunity for keys to leak, and limit blast radius in the case of compromises. "Sharing authority without sharing keys" is provided by capabilities, so there is no reason to share keys directly.
Note that a structurally and cryptographically valid UCAN chain can be semantically invalid. The executor MUST verify the ownership of any external resources at execution time. While not possible for all use cases (e.g. replicated state machines and eventually consistent data), having the Executor be the resource itself is RECOMMENDED.
While certificate chains go a long way toward improving security, they do not provide [confinement] on their own. The principle of least authority SHOULD be used when delegating a UCAN: minimizing the amount of time that a UCAN is valid for and reducing authority to the bare minimum required for the delegate to complete their task. This delegate should be trusted as little as is practical since they can further sub-delegate their authority to others without alerting their delegator. UCANs do not offer confinement (as that would require all processes to be online), so it is impossible to guarantee knowledge of all of the sub-delegations that exist. The ability to revoke some or all downstream UCANs exists as a last resort.
## Inversion of Control
[Inversion of control] is achieved due to two properties: self-certifying delegation and reference passing. There is no Authorization Server (AS) that sits between requestors and resources. In traditional terms, the owner of a UCAN resource is the resource server (RS) directly.
This inverts the usual relationship between resources and users: the resource grants some (or all) authority over itself to agents, as opposed to an Authorization Server managing the relationship between them. This has several major advantages:
- Fully distributed and scalable
- Self-contained request without intermediary
- Partition tolerance, [support for replicated data and machines][overcoming SSI]
- Flexible granularity
- Compositionality: no distinction between resources residing together or apart
```
┌─────────────┐ ┌─────────────┐ ┌─────────────┐
│ │ │ │ │ │
│ │ │ ┌─────────┐ │ │ │
│ │ │ │ Bob's │ │ │ │
│ │ │ │ Photo │ │ │ │
│ │ │ │ Gallery │ │ │ │
│ │ │ └─────────┘ │ │ │
│ │ │ │ │ │
│ Alice's │ │ Bob's │ │ Carol's │
│ Stuff │ │ Stuff │ │ Stuff │
│ │ │ │ │ │
│ ┌───────┼───┼─────────────┼───┼──┐ │
│ │ │ │ │ │ │ │
│ │ │ │ ┌───┼───┼──┼────────┐ │
│ │ │ │ Alice's │ │ │ │ │ │
│ │ │ │ Music │ │ │ │Carol's │ │
│ │ │ │ Player │ │ │ │ Game │ │
│ │ │ │ │ │ │ │ │ │
│ │ │ │ └───┼───┼──┼────────┘ │
│ │ │ │ │ │ │ │
│ └───────┼───┼─────────────┼───┼──┘ │
│ │ │ │ │ │
└─────────────┘ └─────────────┘ └─────────────┘
```
This additionally allows UCAN to model auth for [eventually consistent and replicated state][overcoming SSI].
# Roles
There are several roles that an agent MAY assume:
| Name | Description |
| --------- | ------------------------------------------------------------------------------------------------ |
| Agent | The general class of entities and principals that interact with a UCAN |
| Audience | The Principal delegated to in the current UCAN. Listed in the `aud` field |
| Executor | The Agent that actually performs the action described in an invocation |
| Invoker | A Principal that requests an Executor perform some action that uses the Invoker's authority |
| Issuer | The Principal of the current UCAN. Listed in the `iss` field |
| Owner | A Subject that controls some external resource |
| Principal | An agent identified by DID (listed in a UCAN's `iss` or `aud` field) |
| Revoker | The Issuer listed in a proof chain that revokes a UCAN |
| Subject | The Principal who's authority is delegated or invoked |
| Validator | Any Agent that interprets a UCAN to determine that it is valid, and which capabilities it grants |
```mermaid
flowchart TD
subgraph Agent
subgraph Principal
direction TB
subgraph Issuer
direction TB
subgraph Subject
direction TB
Executor
Owner
end
Revoker
end
subgraph Audience
Invoker
end
end
Validator
end
```
## Subject
> At the very least every object should have a URL
>
> — [Alan Kay], [The computer revolution hasn't happened yet]
> Every Erlang process in the universe should be addressable and introspective
>
> — [Joe Armstrong], [Code Mesh 2016]
A [Subject] represents the Agent that a capability is for. A Subject MUST be referenced by [DID]. This behaves much like a [GUID], with the addition of public key verifiability. This unforgeability prevents malicious namespace collisions which can lead to [confused deputies][confused deputy problem].
### Resource
A resource is some data or process that can be uniquely identified by a [URI]. It can be anything from a row in a database, a user account, storage quota, email address, etc. Resource MAY be as coarse or fine grained as desired. Finer-grained is RECOMMENDED where possible, as it is easier to model the principle of least authority ([PoLA]).
A resource describes the noun of a capability. The resource pointer MUST be provided in [URI] format. Arbitrary and custom URIs MAY be used, provided that the intended recipient can decode the URI. The URI is merely a unique identifier to describe the pointer to — and within — a resource.
Having a unique agent represent a resource (and act as its manager) is RECOMMENDED. However, to help traditional ACL-based systems transition to certificate capabilities, an agent MAY manage multiple resources, and [act as the registrant in the ACL system][wrapping existing systems].
Unless explicitly stated, the Resource of a UCAN MUST be the Subject.
## Issuer & Audience
The Issuer (`iss`) and Audience (`aud`) can be conceptualized as the sender and receiver (respectively) of a postal letter. Every UCAN MUST be signed with the private key associated with the DID in the `iss` field.
For example:
```js
"aud": "did:key:z6MkiTBz1ymuepAQ4HEHYSF1H8quG5GLVVQR3djdX3mDooWp",
"iss": "did:key:zDnaerDaTF5BXEavCrfRZEk316dpbLsfPDZ3WJ5hRTPFU2169",
```
Please see the [Cryptosuite] section for more detail on DIDs.
# Lifecycle
The UCAN lifecycle has four components:
| Spec | Description | Requirement Level |
| ------------ | ------------------------------------------------------------------------ | ----------------- |
| [Delegation] | Pass, attenuate, and secure authority in a partition-tolerant way | REQUIRED |
| [Invocation] | Exercise authority that has been delegated through one or more delegates | REQUIRED |
| [Promise] | Await the result of an Invocation inside another Invocation | RECOMMENDED |
| [Revocation] | Undo a delegation, breaking a delegation chain for malicious users | RECOMMENDED |
```mermaid
flowchart TD
prm(Promise)
inv(Invocation)
del(Delegation)
rev(Revocation)
prm -->|awaits| inv
del -->|proves| inv
rev -.->|kind of| inv
rev -->|invalidates| del
click del href "https://github.com/ucan-wg/delegation" "UCAN Delegation Spec"
click inv href "https://github.com/ucan-wg/invocation" "UCAN Invocation Spec"
click rev href "https://github.com/ucan-wg/revocation" "UCAN Revocation Spec"
```
## Time
It is often useful to talk about a UCAN in the context of some action. For example, a UCAN delegation may be valid when it was created, but expired when invoked.
```mermaid
sequenceDiagram
Alice -->> Bob: Delegate
Bob ->> Bob: Validate
Bob -->> Carol: Delegate
Carol ->> Carol: Validate
Carol ->> Alice: Invoke
Alice ->> Alice: Validate
Alice ->> Alice: Execute
```
### Validity Interval
The period of time that a capability is valid from and until. This is the range from the latest "not before" to the earliest expiry in the UCAN delegation chain.
### Delegation-Time
The moment at which a delegation is asserted. This MAY be captured by an `iat` field, but is generally superfluous to capture in the token.
### Invocation-Time
The moment a UCAN Invocation is created. It must be within the Validity Interval.
### Validation-Time
Validation MAY occur at multiple points during a UCAN's lifecycle. The main two are:
- On receipt of a delegation
- When executing an invocation
### Execution-Time
To avoid the overloaded word "runtime", UCAN adopts the term "execution-time" to express the moment that the executor attempts to use the authority captured in an invocation and associated delegation chain. Validation MUST occur at this time.
## Time Bounds
`nbf` and `exp` stand for "not before" and "expires at," respectively. These MUST be expressed as seconds since the Unix epoch in UTC, without time zone or other offset. Taken together, they represent the time bounds for a token. These timestamps MUST be represented as the number of integer seconds since the Unix epoch. Due to limitations[^js-num-size] in numerics for certain common languages, timestamps outside of the range from $-2^{53} 1$ to $2^{53} 1$ MUST be rejected as invalid.
The `nbf` field is OPTIONAL. When omitted, the token MUST be treated as valid beginning from the Unix epoch. Setting the `nbf` field to a time in the future MUST delay invoking a UCAN. For example, pre-provisioning access to conference materials ahead of time but not allowing access until the day it starts is achievable with judicious use of `nbf`.
The `exp` field is RECOMMENDED. Following the [principle of least authority][PoLA], it is RECOMMENDED to give a timestamp expiry for UCANs. If the token explicitly never expires, the `exp` field MUST be set to `null`. If the time is in the past at validation time, the token MUST be treated as expired and invalid.
Keeping the window of validity as short as possible is RECOMMENDED. Limiting the time range can mitigate the risk of a malicious user abusing a UCAN. However, this is situationally dependent. It may be desirable to limit the frequency of forced reauthorizations for trusted devices. Due to clock drift, time bounds SHOULD NOT be considered exact. A buffer of ±60 seconds is RECOMMENDED.
Several named points of time in the UCAN lifecycle can be found in the [high level spec][UCAN].
Below are a couple examples:
```js
{
// ...
"nbf": 1529496683,
"exp": 1575606941
}
```
```js
{
// ...
"exp": 1575606941
}
```
```js
{
// ...
"nbf": 1529496683,
"exp": null
}
```
## Lifecycle Example
Here is a concrete example of all stages of the UCAN lifecycle for database write access.
```mermaid
sequenceDiagram
participant Database
actor DBAgent
actor Alice
actor Bob
Note over Database, DBAgent: Set Up Agent-Owned Resource
DBAgent ->> Database: createDB()
autonumber 1
Note over DBAgent, Bob: Delegation
DBAgent -->> Alice: delegate(DBAgent, write)
Alice -->> Bob: delegate(DBAgent, write)
Note over Database, Bob: Invocation
Bob ->> DBAgent: invoke(DBAgent, [write, [key, value]], proof: [➊,➋])
DBAgent ->> Database: write(key, value)
DBAgent ->> Bob: ACK
Note over DBAgent, Bob: Revocation
Alice ->> DBAgent: revoke(➋, proof: [➊,➋])
Bob ->> DBAgent: invoke(DBAgent, [write, [key, newValue]], proof: [➊,➋])
DBAgent -X Bob: NAK(➏) [rejected]
```
## Capability
A capability is the association of an ability to a subject: `subject x command x policy`.
The Subject and Command fields are REQUIRED. Any non-normative extensions are OPTIONAL.
For example, a capability may used to represent the ability to send email from a certain address to others at `@example.com`.
| Field | Example |
| ------- | -------------------------------------------------------------------------------------------- |
| Subject | `did:key:z6MkhaXgBZDvotDkL5257faiztiGiC2QtKLGpbnnEGta2doK` |
| Command | `/msg/send` |
| Policy | `["or", ["==", ".from", "mailto:me@example.com"], ["match", ".cc", "mailto:*@example.com"]]` |
For a more complete treatment, please see the [UCAN Delegation][delegation] spec.
## Authority
> Whether to enable cooperation or to limit vulnerability, we care about _authority_ rather than _permissions._ Permissions determine what actions an individual program may perform on objects it can directly access. Authority describes the effects that a program may cause on objects it can access, either directly by permission, or indirectly by permitted interactions with other programs.
>
> —[Mark Miller], [Robust Composition]
The set of capabilities delegated by a UCAN is called its "authority." To frame it another way, it's the set of effects that a principal can cause, and acts as a declarative description of delegated abilities.
Merging capability authorities MUST follow set semantics, where the result includes all capabilities from the input authorities. Since broader capabilities automatically include narrower ones, this process is always additive. Capability authorities can be combined in any order, with the result always being at least as broad as each of the original authorities.
```plaintext
┌───────────────────────┐ ┐
│ │ │
│ │ │
│ │ │
│ │ │
│ Subject B │ │
┌──────────────────┼ ─ ─ x │ │
│ │ Ability Z │ ├── BxZ
│ │ │ │ Capability
│ │ │ │
│ │ │ │
│ Subject A │ │ │
│ x │ │ │
│ Ability Y ─ ─┼──────────────────┘ ┘
│ │
│ │
│ │
│ │
│ │
└───────────────────────┘
└─────────────────────┬────────────────────┘
AxY U BxZ
Capability
```
The capability authority is the total rights of the authorization space down to the relevant volume of authorizations. Individual capabilities MAY overlap; the authority is the union. Every unique delegated capability MUST have equal or narrower capabilities from their delegator. Inside this content space, you can draw a boundary around some resource(s) (their type, identifiers, and paths or children) and their capabilities.
## Command
Commands are concrete messages ("verbs") that MUST be unambiguously interpretable by the Subject of a UCAN. Commands are REQUIRED in invocations. Some examples include `/msg/send`, `/crud/read`, and `/ucan/revoke`.
Much like other message-passing systems, the specific resource MUST define the behavior for a particular message. For instance, `/crud/update` MAY be used to destructively update a database row, or append to a append-only log. Specific messages MAY be created at will; the only restriction is that the Executor understand how to interpret that message in the context of a specific resource.
While arbitrary semantics MAY be described, they MUST apply to the target resource. For instance, it does not make sense to apply `/msg/send` to a typical file system.
### Segment Structure
Commands MUST be lowercase, and begin with a slash (`/`). Segments MUST be separated by a slash. A trailing slash MUST NOT be present. All of the following are syntactically valid Commands:
- `/`
- `/crud`
- `/crud/create`
- `/stack/pop`
- `/crypto/sign`
- `/foo/bar/baz/qux/quux`
- `/ほげ/ふが`
Segment structure is important since shorter Commands prove longer paths. For example, `/` can be used as a proof of _any_ other Command. For example, `/crypto` MAY be used to prove `/crypto/sign` but MUST NOT prove `/stack/pop` or `/cryptocurrency`.
### `/` AKA "Top"
_"Top" (`/`) is the most powerful ability, and as such it SHOULD be handled with care and used sparingly._
The "top" (or "any", or "wildcard") ability MUST be denoted `/`. This can be thought of as something akin to a super user permission in RBAC.
The wildcard ability grants access to all other capabilities for the specified resource, across all possible namespaces. The wildcard ability is useful when "linking" agents by delegating all access to another device controlled by the same user, and that should behave as the same agent. It is extremely powerful, and should be used with care. Among other things, it permits the delegate to update a Subject's mutable DID document (change their private keys), revoke UCAN delegations, and use any resources delegated to the Subject by others.
```mermaid
%%{ init: { 'flowchart': { 'curve': 'linear' } } }%%
flowchart BT
/
/msg --> /
subgraph msgGraph [ ]
/msg/send --> /msg
/msg/receive --> /msg
end
/crud --> /
subgraph crudGraph [ ]
/crud/read --> /crud
/crud/mutate --> /crud
subgraph mutationGraph [ ]
/crud/mutate/create --> /crud/mutate
/crud/mutate/update --> /crud/mutate
/crud/mutate/destroy --> /crud/mutate
end
end
... --> /
```
### Reserved Commands
#### `/ucan` Namespace
The `/ucan` Command namespace MUST be reserved. This MUST include any ability string matching the regex `^ucan\/.*`. This is important for keeping a space for community-blessed Commands in the future, such as standard library Commands, such as [Revocation].
## Attenuation
Attenuation is the process of constraining the capabilities in a delegation chain. Each direct delegation MUST either directly restate or attenuate (diminish) its capabilities.
# Token Resolution
Token resolution is transport specific. The exact format is left to the relevant UCAN transport specification. At minimum, such a specification MUST define at least the following:
1. Request protocol
2. Response protocol
3. Collections format
Note that if an instance cannot dereference a CID at runtime, the UCAN MUST fail validation. This is consistent with the [constructive semantics] of UCAN.
# Nonce
The REQUIRED nonce parameter `nonce` MAY be any value. A randomly generated string is RECOMMENDED to provide a unique UCAN, though it MAY also be a monotonically increasing count of the number of links in the hash chain. This field helps prevent replay attacks and ensures a unique CID per delegation. The `iss`, `aud`, and `exp` fields together will often ensure that UCANs are unique, but adding the nonce ensures uniqueness.
The recommended size of the nonce differs by key type. In many cases, a random 12-byte nonce is sufficient. If uncertain, check the nonce in your DID's crypto suite.
This field SHOULD NOT be used to sign arbitrary data, such as signature challenges. See the [`meta`][Metadata] field for more.
Here is a simple example.
```js
{
// ...
"nonce": {"/": {"bytes": "bGlnaHQgd29yay4"}}
}
```
# Metadata
The OPTIONAL `meta` field contains a map of arbitrary metadata, facts, and proofs of knowledge. The enclosed data MUST be self-evident and externally verifiable. It MAY include information such as hash preimages, server challenges, a Merkle proof, dictionary data, etc.
The data contained in this map MUST NOT be semantically meaningful to delegation chains.
Below is an example:
```js
{
// ...
"meta": {
"challenges": {
"example.com": "abcdef",
"another.example.net": "12345"
},
"sha3_256": {
"B94D27B9934D3E08A52E52D7DA7DABFAC484EFE37A5380EE9088F7ACE2EFCDE9": "hello world"
}
}
}
```
# Canonicalization
## Cryptosuite
Across all UCAN specifications, the following cryptosuite MUST be supported:
| Role | REQUIRED Algorithms | Notes |
| --------- | --------------------------------- | ------------------------------------ |
| Hash | [SHA-256] | |
| Signature | [Ed25519], [P-256], [`secp256k1`] | Preference of Ed25519 is RECOMMENDED |
| [DID] | [`did:key`] | |
## Encoding
All UCANs MUST be canonically encoded with [DAG-CBOR] for signing. A UCAN MAY be presented or stored in other [IPLD] formats (such as [DAG-JSON]), but converted to DAG-CBOR for signature validation.
## Content Identifiers
A UCAN token MUST be configured as follows:
| Parameter | REQUIRED Configuration |
| ------------ | ---------------------- |
| Version | [CIDv1] |
| [Multibase] | [`base58btc`] |
| [Multihash] | [SHA-256] |
| [Multicodec] | [DAG-CBOR] |
> [!NOTE]
> All CIDs encoded as above start with the characters `zdpu`.
The resolution of these addresses is left to the implementation and end-user, and MAY (non-exclusively) include the following: local store, a distributed hash table (DHT), gossip network, or RESTful service.
## Envelope
All UCAN formats MUST use the following envelope format:
| Field | Type | Description |
| --------------------------------- | -------------- | -------------------------------------------------------------- |
| `.0` | `Bytes` | A signature by the Payload's `iss` over the `SigPayload` field |
| `.1` | `SigPayload` | The content that was signed |
| `.1.h` | `VarsigHeader` | The [Varsig] v1 header |
| `.1.ucan/<subspec-tag>@<version>` | `TokenPayload` | The UCAN token payload |
```mermaid
flowchart TD
subgraph Ucan ["UCAN Envelope"]
SignatureBytes["Signature (raw bytes)"]
subgraph SigPayload ["Signature Payload"]
VarsigHeader["Varsig Header"]
subgraph UcanPayload ["Token Payload"]
fields["..."]
end
end
end
```
For example:
```js
[
{
"/": {
bytes:
"7aEDQLYvb3lygk9yvAbk0OZD0q+iF9c3+wpZC4YlFThkiNShcVriobPFr/wl3akjM18VvIv/Zw2LtA4uUmB5m8PWEAU",
},
},
{
h: { "/": { bytes: "NBIFEgEAcQ" } },
"ucan/example@1.0.0-rc.1": {
hello: "world",
},
},
];
```
### Payload
A UCAN's Payload MUST contain at least the following fields:
| Field | Type | Required | Description |
| ------- | ----------------------------------------- | -------- | ----------------------------------------------------------- |
| `iss` | `DID` | Yes | Issuer DID (sender) |
| `aud` | `DID` | Yes | Audience DID (receiver) |
| `sub` | `DID` | Yes | Principal that the chain is about (the [Subject]) |
| `cmd` | `String` | Yes | The [Command] to eventually invoke |
| `args` | `{String : Any}` | Yes | Any [Arguments] that MUST be present in the Invocation |
| `nonce` | `Bytes` | Yes | Nonce |
| `meta` | `{String : Any}` | No | [Meta] (asserted, signed data) — is not delegated authority |
| `nbf` | `Integer` (53-bits[^js-num-size]) | No | "Not before" UTC Unix Timestamp in seconds (valid from) |
| `exp` | `Integer \| Null` (53-bits[^js-num-size]) | Yes | Expiration UTC Unix Timestamp in seconds (valid until) |
# Implementation Recommendations
## Delegation Store
A validator MAY keep a local store of UCANs that it has received. UCANs are immutable but also time-bound so that this store MAY evict expired or revoked UCANs.
This store SHOULD be indexed by CID (content addressing). Multiple indices built on top of this store MAY be used to improve capability search or selection performance.
## Memoized Validation
Aside from revocation, capability validation is idempotent. Marking a CID (or capability index inside that CID) as valid acts as memoization, obviating the need to check the entire structure on every validation. This extends to distinct UCANs that share a proof: if the proof was previously reviewed and is not revoked, it is RECOMMENDED to consider it valid immediately.
Revocation is irreversible. Suppose the validator learns of revocation by UCAN CID. In that case, the UCAN and all of its derivatives in such a cache MUST be marked as invalid, and all validations immediately fail without needing to walk the entire structure.
## Replay Attack Prevention
Replay attack prevention is REQUIRED. Every UCAN token MUST hash to a unique [CIDv1]. Some simple strategies for implementing uniqueness tracking include maintaining a set of previously seen CIDs, or requiring that nonces be monotonically increasing per principal. This MAY be the same structure as a validated UCAN memoization table (if one is implemented).
Maintaining a secondary token expiry index is RECOMMENDED. This enables garbage collection and more efficient search. In cases of very large stores, normal cache performance techniques MAY be used, such as Bloom filters, multi-level caches, and so on.
## Beyond Single System Image
> As we continue to increase the number of globally connected devices, we must embrace a design that considers every single member in the system as the primary site for the data that it is generates. It is completely impractical that we can look at a single, or a small number, of globally distributed data centers as the primary site for all global information that we desire to perform computations with.
>
> —[Meiklejohn], [A Certain Tendency Of The Database Community]
Unlike many authorization systems where a service controls access to resources in their care, location-independent, offline, and leaderless resources require control to live with the user. Therefore, the same data MAY be used across many applications, data stores, and users. Since they don't have a single location, applying UCAN to [RSM]s and [CRDT]s MAY be modelled by lifting the requirement that the Executor be the Subject.
Ultimately this comes down to a question of push vs pull. In push, the subject MUST be the specific site being pushed to ("I command you to apply the following updates to your state").
Pull is the broad class of situations where an Invoker doesn't require that a particular replica apply its state. Applying a change to a local CRDT replica and maintaining a UCAN invocation log is a valid update to "the CRDT": a version of the CRDT Subject exists locally even if the Subject's private key is not present. Gossiping these changes among agents allows each to apply changes that it becomes aware of. Thanks to the invocation log (or equivalent integrated directly into the CRDT), provenance of authority is made transparent.
```mermaid
sequenceDiagram
participant CRDT as Initial Grow-Only Set (CRDT)
actor Alice
actor Bob
actor Carol
autonumber
Note over CRDT, Bob: Setup
CRDT -->> Alice: delegate(CRDT_ID, merge)
CRDT -->> Bob: delegate(CRDT_ID, merge)
Note over Bob, Carol: Bob Invites Carol
Bob -->> Carol: delegate(CRDT_ID, merge)
Note over Alice, Carol: Direct P2P Gossip
Carol ->> Bob: invoke(CRDT_ID, merge, {"Carrot"}, proof: [➋,❸])
Alice ->> Carol: invoke(CRDT_ID, merge, {"Apple"}}, proof: [➊])
Bob ->> Alice: invoke(CRDT_ID, merge, {"Banana", "Carrot"}, proof: [➋])
```
## Wrapping Existing Systems
In the RECOMMENDED scenario, the agent controlling a resource has a unique reference to it. This is always possible in a system that has adopted capabilities end-to-end.
Interacting with existing systems MAY require relying on ambient authority contained in an ACL, non-unique reference, or other authorization logic. These cases are still compatible with UCAN, but the security guarantees are weaker since 1. the surface area is larger, and 2. part of the auth system lives outside UCAN.
```mermaid
sequenceDiagram
participant Database
participant ACL as External Auth System
actor DBAgent
actor Alice
actor Bob
Note over ACL, DBAgent: Setup
DBAgent ->> ACL: signup(DBAgent)
ACL ->> ACL: register(DBAgent)
autonumber 1
Note over DBAgent, Bob: Delegation
DBAgent -->> Alice: delegate(DBAgent, write)
Alice -->> Bob: delegate(DBAgent, write)
Note over Database, Bob: Invocation
Bob ->>+ DBAgent: invoke(DBAgent, [write, key, value], proof: [➊,➋])
critical External System
DBAgent ->> ACL: getToken(write, key, AuthGrant)
ACL ->> DBAgent: AccessToken
DBAgent ->> Database: request(write, value, AccessToken)
Database ->> DBAgent: ACK
end
DBAgent ->>- Bob: ACK
```
# FAQ
## What prevents an unauthorized party from using an intercepted UCAN?
UCANs always contain information about the sender and receiver. A UCAN is signed by the sender (the `iss` field DID) and can only be created by an agent in possession of the relevant private key. The recipient (the `aud` field DID) is required to check that the field matches their DID. These two checks together secure the certificate against use by an unauthorized party. [UCAN Invocations][invocation] prevent use by an unauthorized party by signing over a request to use the capability granted in a delegation chain.
## What prevents replay attacks on the invocation use case?
All UCAN Invocations MUST have a unique CID. The executing agent MUST check this validation uniqueness against a local store of unexpired UCAN hashes.
This is not a concern when simply delegating since receiving a delegation is idempotent.
## Is UCAN secure against person-in-the-middle attacks?
_UCAN does not have any special protection against person-in-the-middle (PITM) attacks._
If a PITM attack was successfully performed on a UCAN delegation, the proof chain would contain the attacker's DID(s). It is possible to detect this scenario and revoke the relevant UCAN but this does require special inspection of the topmost `iss` field to check if it is the expected DID. Therefore, it is strongly RECOMMENDED to only delegate UCANs to agents that are both trusted and authenticated and over secure channels.
## Can my implementation support more cryptographic algorithms?
It is possible to use other algorithms, but doing so limits interoperability with the broader UCAN ecosystem. This is thus considered "off spec" (i.e. non-interoperable). If you choose to extend UCAN with additional algorithms, you MUST include this metadata in the (self-describing) [Varsig] header.
# Related Work and Prior Art
[SPKI/SDSI] is closely related to UCAN. A different encoding format is used, and some details vary (such as a delegation-locking bit), but the core idea and general usage pattern are very close. UCAN can be seen as making these ideas more palatable to a modern audience and adding a few features such as content IDs that were less widespread at the time SPKI/SDSI were written.
[ZCAP-LD] is closely related to UCAN. The primary differences are in formatting, addressing by URL instead of CID, the mechanism of separating invocation from authorization, and single versus multiple proofs.
[CACAO] is a translation of many of these ideas to a cross-blockchain delegated bearer token model. It contains the same basic concepts as UCAN delegation, but is aimed at small messages and identities that are rooted in mutable documents rooted on a blockchain and lacks the ability to subdelegate capabilities.
[Local-First Auth] is a non-certificate-based approach, instead relying on a CRDT to build up a list of group members, devices, and roles. It has a friendly invitation mechanism based on a [Seitan token exchange]. It is also straightforward to see which users have access to what, avoiding the confinement problem seen in many decentralized auth systems.
[Macaroon] is a MAC-based capability and cookie system aimed at distributing authority across services in a trusted network (typically in the context of a Cloud). By not relying on asymmetric signatures, Macaroons achieve excellent space savings and performance, given that the MAC can be checked against the relevant services during discharge. The authority is rooted in an originating server rather than with an end-user.
[Biscuit] uses Datalog to describe capabilities. It has a specialized format but is otherwise in line with UCAN.
[Verifiable credentials] are a solution for data about people or organizations. However, they are aimed at a related-but-distinct problem: asserting attributes about the holder of a DID, including things like work history, age, and membership.
# Acknowledgments
Thank you to [Brendan O'Brien] for real-world feedback, technical collaboration, and implementing the first Golang UCAN library.
Thank you [Blaine Cook] for the real-world feedback, ideas on future features, and lessons from other auth standards.
Many thanks to [Hugo Dias], [Mikael Rogers], and the entire DAG House team for the real world feedback, and finding inventive new use cases.
Thank to [Hannah Howard] and [Alan Shaw] at [Storacha] for their team's feedback from real world use cases.
Many thanks to [Brian Ginsburg] and [Steven Vandevelde] for their many copy edits, feedback from real world usage, maintenance of the TypeScript implementation, and tools such as [ucan.xyz].
Many thanks to [Christopher Joel] for his real-world feedback, raising many pragmatic considerations, and the Rust implementation and related crates.
Many thanks to [Christine Lemmer-Webber] for her handwritten(!) feedback on the design of UCAN, spearheading the [OCapN] initiative, and her related work on [ZCAP-LD].
Many thanks to [Alan Karp] for sharing his vast experience with capability-based authorization, patterns, and many right words for us to search for.
Thanks to [Benjamin Goering] for the many community threads and connections to [W3C] standards.
Thanks to [Juan Caballero] for the numerous questions, clarifications, and general advice on putting together a comprehensible spec.
Thank you [Dan Finlay] for being sufficiently passionate about [OCAP] that we realized that capability systems had a real chance of adoption in an ACL-dominated world.
Thanks to [Peter van Hardenberg][PvH] and [Martin Kleppmann] of [Ink & Switch] for conversations exploring options for access control on CRDTs and [local-first] applications.
Thanks to the entire [SPKI WG][SPKI/SDSI] for their closely related pioneering work.
We want to especially recognize [Mark Miller] for his numerous contributions to the field of distributed auth, programming languages, and networked security writ large.
<!-- Footnotes -->
[^js-num-size]: JavaScript has a single numeric type ([`Number`][JS Number]) for both integers and floats. This representation is defined as a [IEEE-754] double-precision floating point number, which has a 53-bit significand.
[^pcec]: To be precise, this is a [PC/EC][PACELC] system, which is a critical trade-off for many systems. UCAN can be used to model both PC/EC and PA/EL, but is most typically PC/EL.
<!-- Internal Links -->
[Command]: #command
[Cryptosuite]: #cryptosuite
[overcoming SSI]: #beyond-single-system-image
[sub-specifications]: #sub-specifications
[wrapping existing systems]: #wrapping-existing-systems
<!-- External Links -->
[IEEE-754]: https://ieeexplore.ieee.org/document/8766229
[A Certain Tendency Of The Database Community]: https://arxiv.org/pdf/1510.08473.pdf
[ACL]: https://en.wikipedia.org/wiki/Access-control_list
[Alan Karp]: https://github.com/alanhkarp
[Alan Kay]: https://en.wikipedia.org/wiki/Alan_Kay
[Alan Shaw]: https://github.com/alanshaw
[BCP 14]: https://www.rfc-editor.org/info/bcp14
[BLAKE3]: https://github.com/BLAKE3-team/BLAKE3
[Benjamin Goering]: https://github.com/gobengo
[Biscuit]: https://github.com/biscuit-auth/biscuit/
[Blaine Cook]: https://github.com/blaine
[Bluesky]: https://blueskyweb.xyz/
[Brendan O'Brien]: https://github.com/b5
[Brian Ginsburg]: https://github.com/bgins
[Brooklyn Zelenka]: https://github.com/expede
[CACAO]: https://blog.ceramic.network/capability-based-data-security-on-ceramic/
[CIDv1]: https://docs.ipfs.io/concepts/content-addressing/#identifier-formats
[CIDv1]: https://github.com/multiformats/cid
[CRDT]: https://en.wikipedia.org/wiki/Conflict-free_replicated_data_type
[Capability Myths Demolished]: https://srl.cs.jhu.edu/pubs/SRL2003-02.pdf
[Christine Lemmer-Webber]: https://github.com/cwebber
[Christopher Joel]: https://github.com/cdata
[Code Mesh 2016]: https://www.codemesh.io/codemesh2016
[DAG-CBOR]: https://ipld.io/specs/codecs/dag-cbor/spec/
[DAG-JSON]: https://ipld.io/specs/codecs/dag-json/spec/
[DID fragment]: https://www.w3.org/TR/did-core/#fragment
[DID path]: https://www.w3.org/TR/did-core/#path
[DID subject]: https://www.w3.org/TR/did-core/#dfn-did-subjects
[DID]: https://www.w3.org/TR/did-core/
[Dan Finlay]: https://github.com/danfinlay
[Daniel Holmgren]: https://github.com/dholms
[ECDSA security]: https://en.wikipedia.org/wiki/Elliptic_Curve_Digital_Signature_Algorithm#Security
[Ed25519]: https://en.wikipedia.org/wiki/EdDSA#Ed25519
[EdDSA]: https://datatracker.ietf.org/doc/html/rfc8032#section-5.1
[Email about SPKI]: https://web.archive.org/web/20140724054706/http://wiki.erights.org/wiki/Capability-based_Active_Invocation_Certificates
[FIDO]: https://fidoalliance.org/what-is-fido/
[Fission]: https://fission.codes
[GUID]: https://en.wikipedia.org/wiki/Universally_unique_identifier
[Hannah Howard]: https://github.com/hannahhoward
[Hugo Dias]: https://github.com/hugomrdias
[IPLD]: https://ipld.io/
[Ink & Switch]: https://www.inkandswitch.com/
[Inversion of control]: https://en.wikipedia.org/wiki/Inversion_of_control
[Irakli Gozalishvili]: https://github.com/Gozala
[JWT]: https://www.rfc-editor.org/rfc/rfc7519
[Joe Armstrong]: https://en.wikipedia.org/wiki/Joe_Armstrong_(programmer)
[Juan Caballero]: https://github.com/bumblefudge
[Local-First Auth]: https://github.com/local-first-web/auth
[Macaroon]: https://storage.googleapis.com/pub-tools-public-publication-data/pdf/41892.pdf
[Mark Miller]: https://github.com/erights
[Martin Kleppmann]: https://martin.kleppmann.com/
[Meiklejohn]: https://christophermeiklejohn.com/
[Mikael Rogers]: https://github.com/mikeal/
[Multibase]: https://github.com/multiformats/multibase
[Multicodec]: https://github.com/multiformats/multicodec
[Multics]: https://en.wikipedia.org/wiki/Multics
[Multihash]: https://www.multiformats.io/multihash/
[OCAP]: http://erights.org/elib/capability/index.html
[OCapN]: https://github.com/ocapn/ocapn
[P-256]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-4.pdf#page=111
[PACELC]: https://en.wikipedia.org/wiki/PACELC_theorem
[Philipp Krüger]: https://github.com/matheus23
[PoLA]: https://en.wikipedia.org/wiki/Principle_of_least_privilege
[Protocol Labs]: https://protocol.ai/
[PvH]: https://www.pvh.ca
[RBAC]: https://en.wikipedia.org/wiki/Role-based_access_control
[RFC 2119]: https://datatracker.ietf.org/doc/html/rfc2119
[RFC 3339]: https://www.rfc-editor.org/rfc/rfc3339
[RFC 8037]: https://datatracker.ietf.org/doc/html/rfc8037
[RSM]: https://en.wikipedia.org/wiki/State_machine_replication
[Robust Composition]: http://www.erights.org/talks/thesis/markm-thesis.pdf
[SHA-256]: https://en.wikipedia.org/wiki/SHA-2
[SPKI/SDSI]: https://datatracker.ietf.org/wg/spki/about/
[SPKI]: https://theworld.com/~cme/html/spki.html
[Seitan token exchange]: https://book.keybase.io/docs/teams/seitan
[Steven Vandevelde]: https://github.com/icidasset
[Storacha]: https://storacha.network/
[The Structure of Authority]: http://erights.org/talks/no-sep/secnotsep.pdf
[The computer revolution hasn't happened yet]: https://www.youtube.com/watch?v=oKg1hTOQXoY
[UCAN Promise]: https://github.com/ucan-wg/promise
[URI]: https://www.rfc-editor.org/rfc/rfc3986
[Varsig]: https://github.com/ChainAgnostic/varsig
[Verifiable credentials]: https://www.w3.org/2017/vc/WG/
[W3C]: https://www.w3.org/
[WebCrypto API]: https://developer.mozilla.org/en-US/docs/Web/API/Web_Crypto_API
[Witchcraft Software]: https://github.com/expede
[ZCAP-LD]: https://w3c-ccg.github.io/zcap-spec/
[`base58btc`]: https://github.com/multiformats/multibase/blob/master/multibase.csv#L21
[`did:key`]: https://w3c-ccg.github.io/did-method-key/
[`secp256k1`]: https://en.bitcoin.it/wiki/Secp256k1
[browser api crypto key]: https://developer.mozilla.org/en-US/docs/Web/API/CryptoKey
[capabilities]: https://en.wikipedia.org/wiki/Object-capability_model
[caps as keys]: http://www.erights.org/elib/capability/duals/myths.html#caps-as-keys
[certificate capability model]: https://web.archive.org/web/20140724054706/http://wiki.erights.org/wiki/Capability-based_Active_Invocation_Certificates
[confinement]: http://www.erights.org/elib/capability/dist-confine.html
[confused deputy problem]: https://en.wikipedia.org/wiki/Confused_deputy_problem
[constructive semantics]: https://en.wikipedia.org/wiki/Intuitionistic_logic
[content addressable storage]: https://en.wikipedia.org/wiki/Content-addressable_storage
[content addressing]: https://en.wikipedia.org/wiki/Content-addressable_storage
[dag-json multicodec]: https://github.com/multiformats/multicodec/blob/master/table.csv#L104
[delegation]: https://github.com/ucan-wg/delegation
[fail-safe]: https://en.wikipedia.org/wiki/Fail-safe
[invocation]: https://github.com/ucan-wg/invocation
[local-first]: https://www.inkandswitch.com/local-first/
[number zero]: https://n0.computer/
[passkey]: https://www.passkeys.com/
[promise]: https://github.com/ucan-wg/promise
[raw data multicodec]: https://github.com/multiformats/multicodec/blob/a03169371c0a4aec0083febc996c38c3846a0914/table.csv?plain=1#L41
[revocation]: https://github.com/ucan-wg/revocation
[secure hardware enclave]: https://support.apple.com/en-ca/guide/security/sec59b0b31ff
[spki rfc]: https://www.rfc-editor.org/rfc/rfc2693.html
[time definition]: https://en.wikipedia.org/wiki/Temporal_database
[trustless]: https://blueskyweb.xyz/blog/3-6-2022-a-self-authenticating-social-protocol
[ucan.xyz]: https://ucan.xyz

View File

@ -0,0 +1,105 @@
You are an expert in Cosmos SDK data modeling and state management, specializing in building efficient and scalable data models using the Cosmos SDK ORM system with Protocol Buffers.
Key Principles:
- Design type-safe state management systems
- Create efficient protobuf-based data models
- Implement proper table structures and indexes
- Follow Cosmos SDK state management best practices
- Design for light client compatibility
- Implement proper genesis import/export
- Follow protobuf naming conventions
Data Modeling Best Practices:
- Define clear table structures in .proto files
- Use appropriate primary key strategies
- Implement proper secondary indexes
- Follow database normalization principles (1NF+)
- Avoid repeated fields in tables
- Design for future extensibility
- Consider state layout impact on clients
Schema Design Patterns:
- Use unique table IDs within .proto files
- Implement proper field numbering
- Design efficient multipart keys
- Use appropriate field types
- Consider index performance implications
- Implement proper singleton patterns
- Design for automatic query services
State Management:
- Follow Cosmos SDK store patterns
- Implement proper prefix handling
- Design efficient range queries
- Use appropriate encoding strategies
- Handle state migrations properly
- Implement proper genesis handling
- Consider light client proof requirements
Error Handling and Validation:
- Implement proper input validation
- Use appropriate error types
- Handle state errors appropriately
- Implement proper debugging
- Use context appropriately
- Implement proper logging
- Handle concurrent access
Performance Optimization:
- Design efficient key encodings
- Optimize storage space usage
- Implement efficient queries
- Use appropriate index strategies
- Consider state growth implications
- Monitor performance metrics
- Design for scalability
Dependencies:
- cosmos/orm/v1/orm.proto
- [google.golang.org/protobuf](http://google.golang.org/protobuf)
- cosmos-sdk/store
- cosmos-sdk/types
- tendermint/types
- proper logging framework
Key Conventions:
1. Use consistent protobuf naming
2. Implement proper documentation
3. Follow schema versioning practices
4. Use proper table ID management
5. Implement proper testing strategies
Example Table Structure:
```protobuf
message Balance {
option (cosmos.orm.v1.table) = {
id: 1
primary_key: { fields: "account,denom" }
index: { id: 1, fields: "denom" }
};
bytes account = 1;
string denom = 2;
uint64 amount = 3;
}
message Params {
option (cosmos.orm.v1.singleton) = {
id: 2
};
google.protobuf.Duration voting_period = 1;
uint64 min_threshold = 2;
}
```
Refer to the official Cosmos SDK documentation and ORM specifications for best practices and up-to-date APIs.

88
.github/aider/prompts/data-modeler.md vendored Normal file
View File

@ -0,0 +1,88 @@
You are an expert in Go data modeling and PostgreSQL database design, specializing in building efficient and scalable data models using modern ORMs like GORM and SQLBoiler.
Key Principles:
- Write idiomatic Go code following standard Go conventions
- Design clean and maintainable database schemas
- Implement proper relationships and constraints
- Use appropriate indexes for query optimization
- Follow database normalization principles
- Implement proper error handling and validation
- Use meaningful struct tags for ORM mapping
Data Modeling Best Practices:
- Use appropriate Go types for database columns
- Implement proper foreign key relationships
- Design for data integrity and consistency
- Consider soft deletes where appropriate
- Use composite indexes strategically
- Implement proper timestamps for auditing
- Handle NULL values appropriately with pointers
ORM Patterns:
- Use GORM hooks for complex operations
- Implement proper model validation
- Use transactions for atomic operations
- Implement proper eager loading
- Use batch operations for better performance
- Handle migrations systematically
- Implement proper model scopes
Database Design:
- Follow PostgreSQL best practices
- Use appropriate column types
- Implement proper constraints
- Design efficient indexes
- Use JSONB for flexible data when needed
- Implement proper partitioning strategies
- Consider materialized views for complex queries
Error Handling and Validation:
- Implement proper input validation
- Use custom error types
- Handle database errors appropriately
- Implement retry mechanisms
- Use context for timeouts
- Implement proper logging
- Handle concurrent access
Performance Optimization:
- Use appropriate batch sizes
- Implement connection pooling
- Use prepared statements
- Optimize query patterns
- Use appropriate caching strategies
- Monitor query performance
- Use explain analyze for optimization
Dependencies:
- GORM or SQLBoiler
- pq (PostgreSQL driver)
- validator
- migrate
- sqlx (for raw SQL when needed)
- zap or logrus for logging
Key Conventions:
1. Use consistent naming conventions
2. Implement proper documentation
3. Follow database migration best practices
4. Use version control for schema changes
5. Implement proper testing strategies
Example Model Structure:
```go
type User struct {
ID uint `gorm:"primarykey"`
CreatedAt time.Time
UpdatedAt time.Time
DeletedAt gorm.DeletedAt `gorm:"index"`
Name string `gorm:"type:varchar(100);not null"`
Email string `gorm:"type:varchar(100);uniqueIndex;not null"`
Profile Profile
Orders []Order
}
```
Refer to the official documentation of GORM, PostgreSQL, and Go for best practices and up-to-date APIs.

132
.github/aider/prompts/sonr-tech-lead.md vendored Normal file
View File

@ -0,0 +1,132 @@
You are a technical lead specializing in decentralized identity systems and security architecture, with expertise in W3C standards, Cosmos SDK, and blockchain security patterns.
Core Responsibilities:
- Ensure compliance with W3C DID and VC specifications
- Implement secure cryptographic practices
- Design robust authentication flows
- Maintain data privacy and protection
- Guide secure state management
- Enforce access control patterns
- Oversee security testing
Security Standards:
- W3C DID Core 1.0
- W3C Verifiable Credentials
- W3C WebAuthn Level 2
- OAuth 2.0 and OpenID Connect
- JSON Web Signatures (JWS)
- JSON Web Encryption (JWE)
- Decentralized Key Management (DKMS)
Architecture Patterns:
- Secure DID Resolution
- Verifiable Credential Issuance
- DWN Access Control
- Service Authentication
- State Validation
- Key Management
- Privacy-Preserving Protocols
Implementation Guidelines:
- Use standardized cryptographic libraries
- Implement proper key derivation
- Follow secure encoding practices
- Validate all inputs thoroughly
- Handle errors securely
- Log security events properly
- Implement rate limiting
State Management Security:
- Validate state transitions
- Implement proper access control
- Use secure storage patterns
- Handle sensitive data properly
- Implement proper backup strategies
- Maintain state integrity
- Monitor state changes
Authentication & Authorization:
- Implement proper DID authentication
- Use secure credential validation
- Follow OAuth 2.0 best practices
- Implement proper session management
- Use secure token handling
- Implement proper key rotation
- Monitor authentication attempts
Data Protection:
- Encrypt sensitive data
- Implement proper key management
- Use secure storage solutions
- Follow data minimization principles
- Implement proper backup strategies
- Handle data deletion securely
- Monitor data access
Security Testing:
- Implement security unit tests
- Perform integration testing
- Conduct penetration testing
- Monitor security metrics
- Review security logs
- Conduct threat modeling
- Maintain security documentation
Example Security Patterns:
```go
// Secure DID Resolution
func ResolveDID(did string) (*DIDDocument, error) {
// Validate DID format
if !ValidateDIDFormat(did) {
return nil, ErrInvalidDID
}
// Resolve with retry and timeout
ctx, cancel := context.WithTimeout(context.Background(), resolveTimeout)
defer cancel()
doc, err := resolver.ResolveWithContext(ctx, did)
if err != nil {
return nil, fmt.Errorf("resolution failed: %w", err)
}
// Validate document structure
if err := ValidateDIDDocument(doc); err != nil {
return nil, fmt.Errorf("invalid document: %w", err)
}
return doc, nil
}
// Secure Credential Verification
func VerifyCredential(vc *VerifiableCredential) error {
// Check expiration
if vc.IsExpired() {
return ErrCredentialExpired
}
// Verify proof
if err := vc.VerifyProof(trustRegistry); err != nil {
return fmt.Errorf("invalid proof: %w", err)
}
// Verify status
if err := vc.CheckRevocationStatus(); err != nil {
return fmt.Errorf("revocation check failed: %w", err)
}
return nil
}
```
Security Checklist:
1. All DIDs follow W3C specification
2. Credentials implement proper proofs
3. Keys use proper derivation/rotation
4. State changes are validated
5. Access control is enforced
6. Data is properly encrypted
7. Logging captures security events
Refer to W3C specifications, Cosmos SDK security documentation, and blockchain security best practices for detailed implementation guidance.

7
.github/scripts/make_docs.sh vendored Normal file
View File

@ -0,0 +1,7 @@
#!/usr/bin/env bash
set -e
ROOT_DIR=$(git rev-parse --show-toplevel)

39
.github/scripts/upload_cdn.sh vendored Executable file
View File

@ -0,0 +1,39 @@
#!/usr/bin/env bash
set -e
ROOT_DIR=$(git rev-parse --show-toplevel)
# Package the PKL projects
bunx pkl project package $ROOT_DIR/pkl/*/
# Process each directory in .out
for dir in .out/*/; do
# Get the folder name and version
folder=$(basename "$dir")
version=$(echo "$folder" | grep -o '@.*' | sed 's/@//')
new_folder=$(echo "$folder" | sed 's/@[0-9.]*$//')
# Create new directory without version
mkdir -p ".out/$new_folder/$version"
# Copy contents to versioned subdirectory
cp -r "$dir"* ".out/$new_folder/$version/"
# Find and copy only .pkl files from the original package
pkg_dir="$ROOT_DIR/pkl/$new_folder"
if [ -d "$pkg_dir" ]; then
# Copy only .pkl files to version directory
find "$pkg_dir" -name "*.pkl" -exec cp {} ".out/$new_folder/$version/" \;
fi
# Remove old versioned directory
rm -rf "$dir"
# Upload to R2 with new structure
rclone copy ".out/$new_folder" "r2:pkljar/$new_folder"
done
# Cleanup .out directory
rm -rf .out

View File

@ -9,7 +9,7 @@ permissions:
issues: write
jobs:
buf_push:
buf_push_core:
name: Publish to buf.build/onsonr/sonr
runs-on: ubuntu-latest
steps:
@ -24,40 +24,60 @@ jobs:
input: proto
buf_token: ${{ secrets.BUF_TOKEN }}
#
# upload_configs:
# runs-on: ubuntu-latest
# name: Publish to configs.sonr.id
# steps:
# - name: checkout
# uses: actions/checkout@v4
# - name: Upload to R2
# continue-on-error: true
# uses: ryand56/r2-upload-action@latest
# with:
# r2-account-id: ${{ secrets.R2_ACCOUNT_ID }}
# r2-access-key-id: ${{ secrets.R2_ACCESS_KEY_ID }}
# r2-secret-access-key: ${{ secrets.R2_SECRET_ACCESS_KEY }}
# r2-bucket: configs
# source-dir: config
# destination-dir: ./pkl
#
upload_pkl:
buf_push_thirdparty:
name: Publish to buf.build/onsonr/thirdparty
runs-on: ubuntu-latest
name: Publish to pkl.sh
steps:
# Run `git checkout`
- uses: actions/checkout@v3
# Install the `buf` CLI
- uses: bufbuild/buf-setup-action@v1
# Push only the Input in `proto` to the BSR
- uses: bufbuild/buf-push-action@v1
continue-on-error: true
with:
input: third_party/proto
buf_token: ${{ secrets.BUF_TOKEN }}
upload_motr_dwn:
runs-on: ubuntu-latest
name: Publish to configs.sonr.id
steps:
- name: checkout
uses: actions/checkout@v4
- name: Upload to R2
- uses: actions/setup-go@v5
with:
go-version: "1.23"
check-latest: true
- name: Setup go-task / task / taskfile
uses: rnorton5432/setup-task@v1
- name: Build DWN WASM
run: task dwn:build
- name: Upload WASM to R2
continue-on-error: true
uses: ryand56/r2-upload-action@latest
with:
r2-account-id: ${{ secrets.R2_ACCOUNT_ID }}
r2-access-key-id: ${{ secrets.R2_ACCESS_KEY_ID }}
r2-secret-access-key: ${{ secrets.R2_SECRET_ACCESS_KEY }}
r2-bucket: pkljar
source-dir: pkl
destination-dir: .
r2-bucket: nebula
source-dir: ./build/app.wasm
destination-dir: wasm
- name: Upload SW JS to R2
continue-on-error: true
uses: ryand56/r2-upload-action@latest
with:
r2-account-id: ${{ secrets.R2_ACCOUNT_ID }}
r2-access-key-id: ${{ secrets.R2_ACCESS_KEY_ID }}
r2-secret-access-key: ${{ secrets.R2_SECRET_ACCESS_KEY }}
r2-bucket: nebula
source-dir: ./pkg/motr/static/sw.js
destination-dir: js
upload_nebula_cdn:
runs-on: ubuntu-latest

19
.gitignore vendored
View File

@ -1,5 +1,8 @@
# Binaries
.data
schemas
*.db
tools-stamp
*.exe
*.exe~
*.dll
@ -10,14 +13,21 @@
.session.vim
aof*
dist
**/.haptic
static
pkg/webapp/dist
.agent
# Test binary
*.test
.devon*
**/.DS_Store
.task
.wrangler
# Output of the go coverage tool
*.out
tmp
# Exclude embedded files
!internal/files/dist
@ -71,7 +81,9 @@ deploy/**/data
x/.DS_Store
.aider*
buildenv*
nebula/node_modules
node_modules
cmd/gateway/node_modules
pkg/nebula/node_modules
mprocs.yaml
build
@ -79,3 +91,6 @@ sonr.wiki
!devbox.lock
!buf.lock
.air.toml

View File

@ -94,7 +94,6 @@ endif
install: go.sum
go install -mod=readonly $(BUILD_FLAGS) ./cmd/sonrd
go install -mod=readonly $(BUILD_FLAGS) ./cmd/hway
########################################
### Tools & dependencies
@ -113,7 +112,11 @@ draw-deps:
@goviz -i ./cmd/sonrd -d 2 | dot -Tpng -o dependency-graph.png
clean:
rm -rf pkg/nebula/node_modules
rm -rf .aider*
rm -rf static
rm -rf .out
rm -rf build
rm -rf hway.db
rm -rf snapcraft-local.yaml build/
distclean: clean
@ -263,6 +266,9 @@ ictest-tokenfactory:
### testnet ###
###############################################################################
setup-ipfs:
./scripts/ipfs_config.sh
setup-testnet: mod-tidy is-localic-installed install local-image set-testnet-configs setup-testnet-keys
# Run this before testnet keys are added
@ -289,26 +295,48 @@ sh-testnet: mod-tidy
.PHONY: setup-testnet set-testnet-configs testnet testnet-basic sh-testnet
###############################################################################
### custom generation ###
### generation ###
###############################################################################
.PHONY: templ-gen pkl-gen
assets-gen:
@echo "(assets) Generating gateway cloudflare workers assets"
go run github.com/syumai/workers/cmd/workers-assets-gen -mode=go -o ./cmd/hway/build
templ-gen:
@echo "(templ) Generating templ files"
templ generate
.PHONY: pkl-gen tailwind-gen templ-gen
pkl-gen:
@echo "(pkl) Building PKL"
go run github.com/apple/pkl-go/cmd/pkl-gen-go ./pkl/DWN.pkl
go run github.com/apple/pkl-go/cmd/pkl-gen-go ./pkl/ORM.pkl
go run github.com/apple/pkl-go/cmd/pkl-gen-go ./pkl/Txns.pkl
go install github.com/apple/pkl-go/cmd/pkl-gen-go@latest
pkl-gen-go pkl/sonr.motr/ATN.pkl
pkl-gen-go pkl/sonr.hway/Env.pkl
pkl-gen-go pkl/sonr.motr/DWN.pkl
pkl-gen-go pkl/sonr.hway/ORM.pkl
templ-gen:
@go install github.com/a-h/templ/cmd/templ@latest
templ generate
###############################################################################
### custom builds ###
###############################################################################
.PHONY: motr-build hway-build hway-serve
motr-build:
GOOS=js GOARCH=wasm go build -o static/wasm/app.wasm ./cmd/motr/main.go
hway-build: templ-gen
go build -o build/hway ./cmd/hway/main.go
hway-serve: hway-build
./build/hway
###############################################################################
### help ###
###############################################################################
.PHONY: deploy-buf deploy-pkl
deploy-buf:
cd ./proto && bunx buf dep update && bunx buf build && bunx buf push
deploy-pkl:
sh ./.github/scripts/upload_cdn.sh
###############################################################################
### help ###

View File

@ -1,42 +0,0 @@
version: "3"
vars:
ROOT_DIR:
sh: pwd
tasks:
hway:build:
dir: cmd/hway
env:
GOOS: js
GOARCH: wasm
cmds:
- go build -o build/app.wasm main.go
hway:dev:
dir: cmd/hway
cmds:
- task: nebula:build
- bunx wrangler dev
hway:deploy:
dir: cmd/hway
cmds:
- task: nebula:build
- bunx wrangler deploy
motr:build:
dir: internal/dwn
env:
GOOS: js
GOARCH: wasm
cmds:
- go build -o app.wasm wasm/main.go
nebula:build:
dir: pkg/nebula
cmds:
- bun install
- bun run deps.mjs
- bunx tailwindcss -i ./global/styles/globals.css -o ./assets/css/styles.css
- templ generate

View File

@ -104,9 +104,9 @@ func (x *fastReflection_Module) Has(fd protoreflect.FieldDescriptor) bool {
switch fd.FullName() {
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: onsonr.sonr.did.module.v1.Module"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: did.module.v1.Module"))
}
panic(fmt.Errorf("message onsonr.sonr.did.module.v1.Module does not contain field %s", fd.FullName()))
panic(fmt.Errorf("message did.module.v1.Module does not contain field %s", fd.FullName()))
}
}
@ -120,9 +120,9 @@ func (x *fastReflection_Module) Clear(fd protoreflect.FieldDescriptor) {
switch fd.FullName() {
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: onsonr.sonr.did.module.v1.Module"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: did.module.v1.Module"))
}
panic(fmt.Errorf("message onsonr.sonr.did.module.v1.Module does not contain field %s", fd.FullName()))
panic(fmt.Errorf("message did.module.v1.Module does not contain field %s", fd.FullName()))
}
}
@ -136,9 +136,9 @@ func (x *fastReflection_Module) Get(descriptor protoreflect.FieldDescriptor) pro
switch descriptor.FullName() {
default:
if descriptor.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: onsonr.sonr.did.module.v1.Module"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: did.module.v1.Module"))
}
panic(fmt.Errorf("message onsonr.sonr.did.module.v1.Module does not contain field %s", descriptor.FullName()))
panic(fmt.Errorf("message did.module.v1.Module does not contain field %s", descriptor.FullName()))
}
}
@ -156,9 +156,9 @@ func (x *fastReflection_Module) Set(fd protoreflect.FieldDescriptor, value proto
switch fd.FullName() {
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: onsonr.sonr.did.module.v1.Module"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: did.module.v1.Module"))
}
panic(fmt.Errorf("message onsonr.sonr.did.module.v1.Module does not contain field %s", fd.FullName()))
panic(fmt.Errorf("message did.module.v1.Module does not contain field %s", fd.FullName()))
}
}
@ -176,9 +176,9 @@ func (x *fastReflection_Module) Mutable(fd protoreflect.FieldDescriptor) protore
switch fd.FullName() {
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: onsonr.sonr.did.module.v1.Module"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: did.module.v1.Module"))
}
panic(fmt.Errorf("message onsonr.sonr.did.module.v1.Module does not contain field %s", fd.FullName()))
panic(fmt.Errorf("message did.module.v1.Module does not contain field %s", fd.FullName()))
}
}
@ -189,9 +189,9 @@ func (x *fastReflection_Module) NewField(fd protoreflect.FieldDescriptor) protor
switch fd.FullName() {
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: onsonr.sonr.did.module.v1.Module"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: did.module.v1.Module"))
}
panic(fmt.Errorf("message onsonr.sonr.did.module.v1.Module does not contain field %s", fd.FullName()))
panic(fmt.Errorf("message did.module.v1.Module does not contain field %s", fd.FullName()))
}
}
@ -201,7 +201,7 @@ func (x *fastReflection_Module) NewField(fd protoreflect.FieldDescriptor) protor
func (x *fastReflection_Module) WhichOneof(d protoreflect.OneofDescriptor) protoreflect.FieldDescriptor {
switch d.FullName() {
default:
panic(fmt.Errorf("%s is not a oneof field in onsonr.sonr.did.module.v1.Module", d.FullName()))
panic(fmt.Errorf("%s is not a oneof field in did.module.v1.Module", d.FullName()))
}
panic("unreachable")
}
@ -414,29 +414,24 @@ var File_did_module_v1_module_proto protoreflect.FileDescriptor
var file_did_module_v1_module_proto_rawDesc = []byte{
0x0a, 0x1a, 0x64, 0x69, 0x64, 0x2f, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f,
0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x19, 0x6f, 0x6e,
0x73, 0x6f, 0x6e, 0x72, 0x2e, 0x73, 0x6f, 0x6e, 0x72, 0x2e, 0x64, 0x69, 0x64, 0x2e, 0x6d, 0x6f,
0x64, 0x75, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x20, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2f,
0x61, 0x70, 0x70, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x6d, 0x6f, 0x64,
0x75, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x2e, 0x0a, 0x06, 0x4d, 0x6f, 0x64,
0x75, 0x6c, 0x65, 0x3a, 0x24, 0xba, 0xc0, 0x96, 0xda, 0x01, 0x1e, 0x0a, 0x1c, 0x67, 0x69, 0x74,
0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6f, 0x6e, 0x73, 0x6f, 0x6e, 0x72, 0x2f, 0x73,
0x6f, 0x6e, 0x72, 0x2f, 0x78, 0x2f, 0x64, 0x69, 0x64, 0x42, 0xe8, 0x01, 0x0a, 0x1d, 0x63, 0x6f,
0x6d, 0x2e, 0x6f, 0x6e, 0x73, 0x6f, 0x6e, 0x72, 0x2e, 0x73, 0x6f, 0x6e, 0x72, 0x2e, 0x64, 0x69,
0x64, 0x2e, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x4d, 0x6f, 0x64,
0x75, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x31, 0x67, 0x69, 0x74, 0x68,
0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6f, 0x6e, 0x73, 0x6f, 0x6e, 0x72, 0x2f, 0x73, 0x6f,
0x6e, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x64, 0x69, 0x64, 0x2f, 0x6d, 0x6f, 0x64, 0x75, 0x6c,
0x65, 0x2f, 0x76, 0x31, 0x3b, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x76, 0x31, 0xa2, 0x02, 0x04,
0x4f, 0x53, 0x44, 0x4d, 0xaa, 0x02, 0x19, 0x4f, 0x6e, 0x73, 0x6f, 0x6e, 0x72, 0x2e, 0x53, 0x6f,
0x6e, 0x72, 0x2e, 0x44, 0x69, 0x64, 0x2e, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x56, 0x31,
0xca, 0x02, 0x19, 0x4f, 0x6e, 0x73, 0x6f, 0x6e, 0x72, 0x5c, 0x53, 0x6f, 0x6e, 0x72, 0x5c, 0x44,
0x69, 0x64, 0x5c, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x25, 0x4f,
0x6e, 0x73, 0x6f, 0x6e, 0x72, 0x5c, 0x53, 0x6f, 0x6e, 0x72, 0x5c, 0x44, 0x69, 0x64, 0x5c, 0x4d,
0x6f, 0x64, 0x75, 0x6c, 0x65, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61,
0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x1d, 0x4f, 0x6e, 0x73, 0x6f, 0x6e, 0x72, 0x3a, 0x3a, 0x53,
0x6f, 0x6e, 0x72, 0x3a, 0x3a, 0x44, 0x69, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65,
0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x64, 0x69,
0x64, 0x2e, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x20, 0x63, 0x6f, 0x73,
0x6d, 0x6f, 0x73, 0x2f, 0x61, 0x70, 0x70, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31,
0x2f, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x28, 0x0a,
0x06, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x3a, 0x1e, 0xba, 0xc0, 0x96, 0xda, 0x01, 0x18, 0x0a,
0x16, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6f, 0x6e, 0x73, 0x6f,
0x6e, 0x72, 0x2f, 0x73, 0x6f, 0x6e, 0x72, 0x42, 0xa9, 0x01, 0x0a, 0x11, 0x63, 0x6f, 0x6d, 0x2e,
0x64, 0x69, 0x64, 0x2e, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x4d,
0x6f, 0x64, 0x75, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x31, 0x67, 0x69,
0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6f, 0x6e, 0x73, 0x6f, 0x6e, 0x72, 0x2f,
0x73, 0x6f, 0x6e, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x64, 0x69, 0x64, 0x2f, 0x6d, 0x6f, 0x64,
0x75, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x3b, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x76, 0x31, 0xa2,
0x02, 0x03, 0x44, 0x4d, 0x58, 0xaa, 0x02, 0x0d, 0x44, 0x69, 0x64, 0x2e, 0x4d, 0x6f, 0x64, 0x75,
0x6c, 0x65, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x0d, 0x44, 0x69, 0x64, 0x5c, 0x4d, 0x6f, 0x64, 0x75,
0x6c, 0x65, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x19, 0x44, 0x69, 0x64, 0x5c, 0x4d, 0x6f, 0x64, 0x75,
0x6c, 0x65, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74,
0x61, 0xea, 0x02, 0x0f, 0x44, 0x69, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x3a,
0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@ -453,7 +448,7 @@ func file_did_module_v1_module_proto_rawDescGZIP() []byte {
var file_did_module_v1_module_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
var file_did_module_v1_module_proto_goTypes = []interface{}{
(*Module)(nil), // 0: onsonr.sonr.did.module.v1.Module
(*Module)(nil), // 0: did.module.v1.Module
}
var file_did_module_v1_module_proto_depIdxs = []int32{
0, // [0:0] is the sub-list for method output_type

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -21,7 +21,6 @@ const _ = grpc.SupportPackageIsVersion9
const (
Query_Params_FullMethodName = "/did.v1.Query/Params"
Query_Resolve_FullMethodName = "/did.v1.Query/Resolve"
Query_Sign_FullMethodName = "/did.v1.Query/Sign"
Query_Verify_FullMethodName = "/did.v1.Query/Verify"
)
@ -35,8 +34,6 @@ type QueryClient interface {
Params(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error)
// Resolve queries the DID document by its id.
Resolve(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (*QueryResolveResponse, error)
// Sign signs a message with the DID document
Sign(ctx context.Context, in *QuerySignRequest, opts ...grpc.CallOption) (*QuerySignResponse, error)
// Verify verifies a message with the DID document
Verify(ctx context.Context, in *QueryVerifyRequest, opts ...grpc.CallOption) (*QueryVerifyResponse, error)
}
@ -69,16 +66,6 @@ func (c *queryClient) Resolve(ctx context.Context, in *QueryRequest, opts ...grp
return out, nil
}
func (c *queryClient) Sign(ctx context.Context, in *QuerySignRequest, opts ...grpc.CallOption) (*QuerySignResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(QuerySignResponse)
err := c.cc.Invoke(ctx, Query_Sign_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *queryClient) Verify(ctx context.Context, in *QueryVerifyRequest, opts ...grpc.CallOption) (*QueryVerifyResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(QueryVerifyResponse)
@ -99,8 +86,6 @@ type QueryServer interface {
Params(context.Context, *QueryRequest) (*QueryParamsResponse, error)
// Resolve queries the DID document by its id.
Resolve(context.Context, *QueryRequest) (*QueryResolveResponse, error)
// Sign signs a message with the DID document
Sign(context.Context, *QuerySignRequest) (*QuerySignResponse, error)
// Verify verifies a message with the DID document
Verify(context.Context, *QueryVerifyRequest) (*QueryVerifyResponse, error)
mustEmbedUnimplementedQueryServer()
@ -119,9 +104,6 @@ func (UnimplementedQueryServer) Params(context.Context, *QueryRequest) (*QueryPa
func (UnimplementedQueryServer) Resolve(context.Context, *QueryRequest) (*QueryResolveResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Resolve not implemented")
}
func (UnimplementedQueryServer) Sign(context.Context, *QuerySignRequest) (*QuerySignResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Sign not implemented")
}
func (UnimplementedQueryServer) Verify(context.Context, *QueryVerifyRequest) (*QueryVerifyResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Verify not implemented")
}
@ -182,24 +164,6 @@ func _Query_Resolve_Handler(srv interface{}, ctx context.Context, dec func(inter
return interceptor(ctx, in, info, handler)
}
func _Query_Sign_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(QuerySignRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(QueryServer).Sign(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Query_Sign_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(QueryServer).Sign(ctx, req.(*QuerySignRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Query_Verify_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(QueryVerifyRequest)
if err := dec(in); err != nil {
@ -233,10 +197,6 @@ var Query_ServiceDesc = grpc.ServiceDesc{
MethodName: "Resolve",
Handler: _Query_Resolve_Handler,
},
{
MethodName: "Sign",
Handler: _Query_Sign_Handler,
},
{
MethodName: "Verify",
Handler: _Query_Verify_Handler,

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -18,8 +18,8 @@ var (
)
func init() {
file_vault_module_v1_module_proto_init()
md_Module = File_vault_module_v1_module_proto.Messages().ByName("Module")
file_dwn_module_v1_module_proto_init()
md_Module = File_dwn_module_v1_module_proto.Messages().ByName("Module")
}
var _ protoreflect.Message = (*fastReflection_Module)(nil)
@ -31,7 +31,7 @@ func (x *Module) ProtoReflect() protoreflect.Message {
}
func (x *Module) slowProtoReflect() protoreflect.Message {
mi := &file_vault_module_v1_module_proto_msgTypes[0]
mi := &file_dwn_module_v1_module_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@ -104,9 +104,9 @@ func (x *fastReflection_Module) Has(fd protoreflect.FieldDescriptor) bool {
switch fd.FullName() {
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: onsonr.sonr.vault.module.v1.Module"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: dwn.module.v1.Module"))
}
panic(fmt.Errorf("message onsonr.sonr.vault.module.v1.Module does not contain field %s", fd.FullName()))
panic(fmt.Errorf("message dwn.module.v1.Module does not contain field %s", fd.FullName()))
}
}
@ -120,9 +120,9 @@ func (x *fastReflection_Module) Clear(fd protoreflect.FieldDescriptor) {
switch fd.FullName() {
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: onsonr.sonr.vault.module.v1.Module"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: dwn.module.v1.Module"))
}
panic(fmt.Errorf("message onsonr.sonr.vault.module.v1.Module does not contain field %s", fd.FullName()))
panic(fmt.Errorf("message dwn.module.v1.Module does not contain field %s", fd.FullName()))
}
}
@ -136,9 +136,9 @@ func (x *fastReflection_Module) Get(descriptor protoreflect.FieldDescriptor) pro
switch descriptor.FullName() {
default:
if descriptor.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: onsonr.sonr.vault.module.v1.Module"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: dwn.module.v1.Module"))
}
panic(fmt.Errorf("message onsonr.sonr.vault.module.v1.Module does not contain field %s", descriptor.FullName()))
panic(fmt.Errorf("message dwn.module.v1.Module does not contain field %s", descriptor.FullName()))
}
}
@ -156,9 +156,9 @@ func (x *fastReflection_Module) Set(fd protoreflect.FieldDescriptor, value proto
switch fd.FullName() {
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: onsonr.sonr.vault.module.v1.Module"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: dwn.module.v1.Module"))
}
panic(fmt.Errorf("message onsonr.sonr.vault.module.v1.Module does not contain field %s", fd.FullName()))
panic(fmt.Errorf("message dwn.module.v1.Module does not contain field %s", fd.FullName()))
}
}
@ -176,9 +176,9 @@ func (x *fastReflection_Module) Mutable(fd protoreflect.FieldDescriptor) protore
switch fd.FullName() {
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: onsonr.sonr.vault.module.v1.Module"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: dwn.module.v1.Module"))
}
panic(fmt.Errorf("message onsonr.sonr.vault.module.v1.Module does not contain field %s", fd.FullName()))
panic(fmt.Errorf("message dwn.module.v1.Module does not contain field %s", fd.FullName()))
}
}
@ -189,9 +189,9 @@ func (x *fastReflection_Module) NewField(fd protoreflect.FieldDescriptor) protor
switch fd.FullName() {
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: onsonr.sonr.vault.module.v1.Module"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: dwn.module.v1.Module"))
}
panic(fmt.Errorf("message onsonr.sonr.vault.module.v1.Module does not contain field %s", fd.FullName()))
panic(fmt.Errorf("message dwn.module.v1.Module does not contain field %s", fd.FullName()))
}
}
@ -201,7 +201,7 @@ func (x *fastReflection_Module) NewField(fd protoreflect.FieldDescriptor) protor
func (x *fastReflection_Module) WhichOneof(d protoreflect.OneofDescriptor) protoreflect.FieldDescriptor {
switch d.FullName() {
default:
panic(fmt.Errorf("%s is not a oneof field in onsonr.sonr.vault.module.v1.Module", d.FullName()))
panic(fmt.Errorf("%s is not a oneof field in dwn.module.v1.Module", d.FullName()))
}
panic("unreachable")
}
@ -373,7 +373,7 @@ func (x *fastReflection_Module) ProtoMethods() *protoiface.Methods {
// versions:
// protoc-gen-go v1.27.0
// protoc (unknown)
// source: vault/module/v1/module.proto
// source: dwn/module/v1/module.proto
const (
// Verify that this generated code is sufficiently up-to-date.
@ -393,7 +393,7 @@ type Module struct {
func (x *Module) Reset() {
*x = Module{}
if protoimpl.UnsafeEnabled {
mi := &file_vault_module_v1_module_proto_msgTypes[0]
mi := &file_dwn_module_v1_module_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@ -407,56 +407,50 @@ func (*Module) ProtoMessage() {}
// Deprecated: Use Module.ProtoReflect.Descriptor instead.
func (*Module) Descriptor() ([]byte, []int) {
return file_vault_module_v1_module_proto_rawDescGZIP(), []int{0}
return file_dwn_module_v1_module_proto_rawDescGZIP(), []int{0}
}
var File_vault_module_v1_module_proto protoreflect.FileDescriptor
var File_dwn_module_v1_module_proto protoreflect.FileDescriptor
var file_vault_module_v1_module_proto_rawDesc = []byte{
0x0a, 0x1c, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x2f, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2f, 0x76,
0x31, 0x2f, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1b,
0x6f, 0x6e, 0x73, 0x6f, 0x6e, 0x72, 0x2e, 0x73, 0x6f, 0x6e, 0x72, 0x2e, 0x76, 0x61, 0x75, 0x6c,
0x74, 0x2e, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x20, 0x63, 0x6f, 0x73,
var file_dwn_module_v1_module_proto_rawDesc = []byte{
0x0a, 0x1a, 0x64, 0x77, 0x6e, 0x2f, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f,
0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x64, 0x77,
0x6e, 0x2e, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x20, 0x63, 0x6f, 0x73,
0x6d, 0x6f, 0x73, 0x2f, 0x61, 0x70, 0x70, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31,
0x2f, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x30, 0x0a,
0x06, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x3a, 0x26, 0xba, 0xc0, 0x96, 0xda, 0x01, 0x20, 0x0a,
0x1e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6f, 0x6e, 0x73, 0x6f,
0x6e, 0x72, 0x2f, 0x73, 0x6f, 0x6e, 0x72, 0x2f, 0x78, 0x2f, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x42,
0xf4, 0x01, 0x0a, 0x1f, 0x63, 0x6f, 0x6d, 0x2e, 0x6f, 0x6e, 0x73, 0x6f, 0x6e, 0x72, 0x2e, 0x73,
0x6f, 0x6e, 0x72, 0x2e, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x2e, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65,
0x2e, 0x76, 0x31, 0x42, 0x0b, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f,
0x50, 0x01, 0x5a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6f,
0x6e, 0x73, 0x6f, 0x6e, 0x72, 0x2f, 0x73, 0x6f, 0x6e, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76,
0x61, 0x75, 0x6c, 0x74, 0x2f, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x3b, 0x6d,
0x6f, 0x64, 0x75, 0x6c, 0x65, 0x76, 0x31, 0xa2, 0x02, 0x04, 0x4f, 0x53, 0x56, 0x4d, 0xaa, 0x02,
0x1b, 0x4f, 0x6e, 0x73, 0x6f, 0x6e, 0x72, 0x2e, 0x53, 0x6f, 0x6e, 0x72, 0x2e, 0x56, 0x61, 0x75,
0x6c, 0x74, 0x2e, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x1b, 0x4f,
0x6e, 0x73, 0x6f, 0x6e, 0x72, 0x5c, 0x53, 0x6f, 0x6e, 0x72, 0x5c, 0x56, 0x61, 0x75, 0x6c, 0x74,
0x5c, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x27, 0x4f, 0x6e, 0x73,
0x6f, 0x6e, 0x72, 0x5c, 0x53, 0x6f, 0x6e, 0x72, 0x5c, 0x56, 0x61, 0x75, 0x6c, 0x74, 0x5c, 0x4d,
0x6f, 0x64, 0x75, 0x6c, 0x65, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61,
0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x1f, 0x4f, 0x6e, 0x73, 0x6f, 0x6e, 0x72, 0x3a, 0x3a, 0x53,
0x6f, 0x6e, 0x72, 0x3a, 0x3a, 0x56, 0x61, 0x75, 0x6c, 0x74, 0x3a, 0x3a, 0x4d, 0x6f, 0x64, 0x75,
0x6c, 0x65, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
0x2f, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x28, 0x0a,
0x06, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x3a, 0x1e, 0xba, 0xc0, 0x96, 0xda, 0x01, 0x18, 0x0a,
0x16, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6f, 0x6e, 0x73, 0x6f,
0x6e, 0x72, 0x2f, 0x73, 0x6f, 0x6e, 0x72, 0x42, 0xa9, 0x01, 0x0a, 0x11, 0x63, 0x6f, 0x6d, 0x2e,
0x64, 0x77, 0x6e, 0x2e, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x4d,
0x6f, 0x64, 0x75, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x31, 0x67, 0x69,
0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6f, 0x6e, 0x73, 0x6f, 0x6e, 0x72, 0x2f,
0x73, 0x6f, 0x6e, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x64, 0x77, 0x6e, 0x2f, 0x6d, 0x6f, 0x64,
0x75, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x3b, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x76, 0x31, 0xa2,
0x02, 0x03, 0x44, 0x4d, 0x58, 0xaa, 0x02, 0x0d, 0x44, 0x77, 0x6e, 0x2e, 0x4d, 0x6f, 0x64, 0x75,
0x6c, 0x65, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x0d, 0x44, 0x77, 0x6e, 0x5c, 0x4d, 0x6f, 0x64, 0x75,
0x6c, 0x65, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x19, 0x44, 0x77, 0x6e, 0x5c, 0x4d, 0x6f, 0x64, 0x75,
0x6c, 0x65, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74,
0x61, 0xea, 0x02, 0x0f, 0x44, 0x77, 0x6e, 0x3a, 0x3a, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x3a,
0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_vault_module_v1_module_proto_rawDescOnce sync.Once
file_vault_module_v1_module_proto_rawDescData = file_vault_module_v1_module_proto_rawDesc
file_dwn_module_v1_module_proto_rawDescOnce sync.Once
file_dwn_module_v1_module_proto_rawDescData = file_dwn_module_v1_module_proto_rawDesc
)
func file_vault_module_v1_module_proto_rawDescGZIP() []byte {
file_vault_module_v1_module_proto_rawDescOnce.Do(func() {
file_vault_module_v1_module_proto_rawDescData = protoimpl.X.CompressGZIP(file_vault_module_v1_module_proto_rawDescData)
func file_dwn_module_v1_module_proto_rawDescGZIP() []byte {
file_dwn_module_v1_module_proto_rawDescOnce.Do(func() {
file_dwn_module_v1_module_proto_rawDescData = protoimpl.X.CompressGZIP(file_dwn_module_v1_module_proto_rawDescData)
})
return file_vault_module_v1_module_proto_rawDescData
return file_dwn_module_v1_module_proto_rawDescData
}
var file_vault_module_v1_module_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
var file_vault_module_v1_module_proto_goTypes = []interface{}{
(*Module)(nil), // 0: onsonr.sonr.vault.module.v1.Module
var file_dwn_module_v1_module_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
var file_dwn_module_v1_module_proto_goTypes = []interface{}{
(*Module)(nil), // 0: dwn.module.v1.Module
}
var file_vault_module_v1_module_proto_depIdxs = []int32{
var file_dwn_module_v1_module_proto_depIdxs = []int32{
0, // [0:0] is the sub-list for method output_type
0, // [0:0] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
@ -464,13 +458,13 @@ var file_vault_module_v1_module_proto_depIdxs = []int32{
0, // [0:0] is the sub-list for field type_name
}
func init() { file_vault_module_v1_module_proto_init() }
func file_vault_module_v1_module_proto_init() {
if File_vault_module_v1_module_proto != nil {
func init() { file_dwn_module_v1_module_proto_init() }
func file_dwn_module_v1_module_proto_init() {
if File_dwn_module_v1_module_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_vault_module_v1_module_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
file_dwn_module_v1_module_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Module); i {
case 0:
return &v.state
@ -487,18 +481,18 @@ func file_vault_module_v1_module_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_vault_module_v1_module_proto_rawDesc,
RawDescriptor: file_dwn_module_v1_module_proto_rawDesc,
NumEnums: 0,
NumMessages: 1,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_vault_module_v1_module_proto_goTypes,
DependencyIndexes: file_vault_module_v1_module_proto_depIdxs,
MessageInfos: file_vault_module_v1_module_proto_msgTypes,
GoTypes: file_dwn_module_v1_module_proto_goTypes,
DependencyIndexes: file_dwn_module_v1_module_proto_depIdxs,
MessageInfos: file_dwn_module_v1_module_proto_msgTypes,
}.Build()
File_vault_module_v1_module_proto = out.File
file_vault_module_v1_module_proto_rawDesc = nil
file_vault_module_v1_module_proto_goTypes = nil
file_vault_module_v1_module_proto_depIdxs = nil
File_dwn_module_v1_module_proto = out.File
file_dwn_module_v1_module_proto_rawDesc = nil
file_dwn_module_v1_module_proto_goTypes = nil
file_dwn_module_v1_module_proto_depIdxs = nil
}

4879
api/dwn/v1/genesis.pulsar.go Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -2,9 +2,9 @@
// versions:
// - protoc-gen-go-grpc v1.5.1
// - protoc (unknown)
// source: vault/v1/query.proto
// source: dwn/v1/query.proto
package vaultv1
package dwnv1
import (
context "context"
@ -19,10 +19,10 @@ import (
const _ = grpc.SupportPackageIsVersion9
const (
Query_Params_FullMethodName = "/vault.v1.Query/Params"
Query_Schema_FullMethodName = "/vault.v1.Query/Schema"
Query_Allocate_FullMethodName = "/vault.v1.Query/Allocate"
Query_Sync_FullMethodName = "/vault.v1.Query/Sync"
Query_Params_FullMethodName = "/dwn.v1.Query/Params"
Query_Schema_FullMethodName = "/dwn.v1.Query/Schema"
Query_Allocate_FullMethodName = "/dwn.v1.Query/Allocate"
Query_Sync_FullMethodName = "/dwn.v1.Query/Sync"
)
// QueryClient is the client API for Query service.
@ -228,7 +228,7 @@ func _Query_Sync_Handler(srv interface{}, ctx context.Context, dec func(interfac
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var Query_ServiceDesc = grpc.ServiceDesc{
ServiceName: "vault.v1.Query",
ServiceName: "dwn.v1.Query",
HandlerType: (*QueryServer)(nil),
Methods: []grpc.MethodDesc{
{
@ -249,5 +249,5 @@ var Query_ServiceDesc = grpc.ServiceDesc{
},
},
Streams: []grpc.StreamDesc{},
Metadata: "vault/v1/query.proto",
Metadata: "dwn/v1/query.proto",
}

View File

@ -0,0 +1,292 @@
// Code generated by protoc-gen-go-cosmos-orm. DO NOT EDIT.
package dwnv1
import (
context "context"
ormlist "cosmossdk.io/orm/model/ormlist"
ormtable "cosmossdk.io/orm/model/ormtable"
ormerrors "cosmossdk.io/orm/types/ormerrors"
)
type CredentialTable interface {
Insert(ctx context.Context, credential *Credential) error
Update(ctx context.Context, credential *Credential) error
Save(ctx context.Context, credential *Credential) error
Delete(ctx context.Context, credential *Credential) error
Has(ctx context.Context, id []byte) (found bool, err error)
// Get returns nil and an error which responds true to ormerrors.IsNotFound() if the record was not found.
Get(ctx context.Context, id []byte) (*Credential, error)
List(ctx context.Context, prefixKey CredentialIndexKey, opts ...ormlist.Option) (CredentialIterator, error)
ListRange(ctx context.Context, from, to CredentialIndexKey, opts ...ormlist.Option) (CredentialIterator, error)
DeleteBy(ctx context.Context, prefixKey CredentialIndexKey) error
DeleteRange(ctx context.Context, from, to CredentialIndexKey) error
doNotImplement()
}
type CredentialIterator struct {
ormtable.Iterator
}
func (i CredentialIterator) Value() (*Credential, error) {
var credential Credential
err := i.UnmarshalMessage(&credential)
return &credential, err
}
type CredentialIndexKey interface {
id() uint32
values() []interface{}
credentialIndexKey()
}
// primary key starting index..
type CredentialPrimaryKey = CredentialIdIndexKey
type CredentialIdIndexKey struct {
vs []interface{}
}
func (x CredentialIdIndexKey) id() uint32 { return 0 }
func (x CredentialIdIndexKey) values() []interface{} { return x.vs }
func (x CredentialIdIndexKey) credentialIndexKey() {}
func (this CredentialIdIndexKey) WithId(id []byte) CredentialIdIndexKey {
this.vs = []interface{}{id}
return this
}
type credentialTable struct {
table ormtable.Table
}
func (this credentialTable) Insert(ctx context.Context, credential *Credential) error {
return this.table.Insert(ctx, credential)
}
func (this credentialTable) Update(ctx context.Context, credential *Credential) error {
return this.table.Update(ctx, credential)
}
func (this credentialTable) Save(ctx context.Context, credential *Credential) error {
return this.table.Save(ctx, credential)
}
func (this credentialTable) Delete(ctx context.Context, credential *Credential) error {
return this.table.Delete(ctx, credential)
}
func (this credentialTable) Has(ctx context.Context, id []byte) (found bool, err error) {
return this.table.PrimaryKey().Has(ctx, id)
}
func (this credentialTable) Get(ctx context.Context, id []byte) (*Credential, error) {
var credential Credential
found, err := this.table.PrimaryKey().Get(ctx, &credential, id)
if err != nil {
return nil, err
}
if !found {
return nil, ormerrors.NotFound
}
return &credential, nil
}
func (this credentialTable) List(ctx context.Context, prefixKey CredentialIndexKey, opts ...ormlist.Option) (CredentialIterator, error) {
it, err := this.table.GetIndexByID(prefixKey.id()).List(ctx, prefixKey.values(), opts...)
return CredentialIterator{it}, err
}
func (this credentialTable) ListRange(ctx context.Context, from, to CredentialIndexKey, opts ...ormlist.Option) (CredentialIterator, error) {
it, err := this.table.GetIndexByID(from.id()).ListRange(ctx, from.values(), to.values(), opts...)
return CredentialIterator{it}, err
}
func (this credentialTable) DeleteBy(ctx context.Context, prefixKey CredentialIndexKey) error {
return this.table.GetIndexByID(prefixKey.id()).DeleteBy(ctx, prefixKey.values()...)
}
func (this credentialTable) DeleteRange(ctx context.Context, from, to CredentialIndexKey) error {
return this.table.GetIndexByID(from.id()).DeleteRange(ctx, from.values(), to.values())
}
func (this credentialTable) doNotImplement() {}
var _ CredentialTable = credentialTable{}
func NewCredentialTable(db ormtable.Schema) (CredentialTable, error) {
table := db.GetTable(&Credential{})
if table == nil {
return nil, ormerrors.TableNotFound.Wrap(string((&Credential{}).ProtoReflect().Descriptor().FullName()))
}
return credentialTable{table}, nil
}
type ProfileTable interface {
Insert(ctx context.Context, profile *Profile) error
Update(ctx context.Context, profile *Profile) error
Save(ctx context.Context, profile *Profile) error
Delete(ctx context.Context, profile *Profile) error
Has(ctx context.Context, account []byte) (found bool, err error)
// Get returns nil and an error which responds true to ormerrors.IsNotFound() if the record was not found.
Get(ctx context.Context, account []byte) (*Profile, error)
List(ctx context.Context, prefixKey ProfileIndexKey, opts ...ormlist.Option) (ProfileIterator, error)
ListRange(ctx context.Context, from, to ProfileIndexKey, opts ...ormlist.Option) (ProfileIterator, error)
DeleteBy(ctx context.Context, prefixKey ProfileIndexKey) error
DeleteRange(ctx context.Context, from, to ProfileIndexKey) error
doNotImplement()
}
type ProfileIterator struct {
ormtable.Iterator
}
func (i ProfileIterator) Value() (*Profile, error) {
var profile Profile
err := i.UnmarshalMessage(&profile)
return &profile, err
}
type ProfileIndexKey interface {
id() uint32
values() []interface{}
profileIndexKey()
}
// primary key starting index..
type ProfilePrimaryKey = ProfileAccountIndexKey
type ProfileAccountIndexKey struct {
vs []interface{}
}
func (x ProfileAccountIndexKey) id() uint32 { return 0 }
func (x ProfileAccountIndexKey) values() []interface{} { return x.vs }
func (x ProfileAccountIndexKey) profileIndexKey() {}
func (this ProfileAccountIndexKey) WithAccount(account []byte) ProfileAccountIndexKey {
this.vs = []interface{}{account}
return this
}
type ProfileAmountIndexKey struct {
vs []interface{}
}
func (x ProfileAmountIndexKey) id() uint32 { return 1 }
func (x ProfileAmountIndexKey) values() []interface{} { return x.vs }
func (x ProfileAmountIndexKey) profileIndexKey() {}
func (this ProfileAmountIndexKey) WithAmount(amount uint64) ProfileAmountIndexKey {
this.vs = []interface{}{amount}
return this
}
type profileTable struct {
table ormtable.Table
}
func (this profileTable) Insert(ctx context.Context, profile *Profile) error {
return this.table.Insert(ctx, profile)
}
func (this profileTable) Update(ctx context.Context, profile *Profile) error {
return this.table.Update(ctx, profile)
}
func (this profileTable) Save(ctx context.Context, profile *Profile) error {
return this.table.Save(ctx, profile)
}
func (this profileTable) Delete(ctx context.Context, profile *Profile) error {
return this.table.Delete(ctx, profile)
}
func (this profileTable) Has(ctx context.Context, account []byte) (found bool, err error) {
return this.table.PrimaryKey().Has(ctx, account)
}
func (this profileTable) Get(ctx context.Context, account []byte) (*Profile, error) {
var profile Profile
found, err := this.table.PrimaryKey().Get(ctx, &profile, account)
if err != nil {
return nil, err
}
if !found {
return nil, ormerrors.NotFound
}
return &profile, nil
}
func (this profileTable) List(ctx context.Context, prefixKey ProfileIndexKey, opts ...ormlist.Option) (ProfileIterator, error) {
it, err := this.table.GetIndexByID(prefixKey.id()).List(ctx, prefixKey.values(), opts...)
return ProfileIterator{it}, err
}
func (this profileTable) ListRange(ctx context.Context, from, to ProfileIndexKey, opts ...ormlist.Option) (ProfileIterator, error) {
it, err := this.table.GetIndexByID(from.id()).ListRange(ctx, from.values(), to.values(), opts...)
return ProfileIterator{it}, err
}
func (this profileTable) DeleteBy(ctx context.Context, prefixKey ProfileIndexKey) error {
return this.table.GetIndexByID(prefixKey.id()).DeleteBy(ctx, prefixKey.values()...)
}
func (this profileTable) DeleteRange(ctx context.Context, from, to ProfileIndexKey) error {
return this.table.GetIndexByID(from.id()).DeleteRange(ctx, from.values(), to.values())
}
func (this profileTable) doNotImplement() {}
var _ ProfileTable = profileTable{}
func NewProfileTable(db ormtable.Schema) (ProfileTable, error) {
table := db.GetTable(&Profile{})
if table == nil {
return nil, ormerrors.TableNotFound.Wrap(string((&Profile{}).ProtoReflect().Descriptor().FullName()))
}
return profileTable{table}, nil
}
type StateStore interface {
CredentialTable() CredentialTable
ProfileTable() ProfileTable
doNotImplement()
}
type stateStore struct {
credential CredentialTable
profile ProfileTable
}
func (x stateStore) CredentialTable() CredentialTable {
return x.credential
}
func (x stateStore) ProfileTable() ProfileTable {
return x.profile
}
func (stateStore) doNotImplement() {}
var _ StateStore = stateStore{}
func NewStateStore(db ormtable.Schema) (StateStore, error) {
credentialTable, err := NewCredentialTable(db)
if err != nil {
return nil, err
}
profileTable, err := NewProfileTable(db)
if err != nil {
return nil, err
}
return stateStore{
credentialTable,
profileTable,
}, nil
}

1515
api/dwn/v1/state.pulsar.go Normal file

File diff suppressed because it is too large Load Diff

2061
api/dwn/v1/tx.pulsar.go Normal file

File diff suppressed because it is too large Load Diff

View File

@ -2,9 +2,9 @@
// versions:
// - protoc-gen-go-grpc v1.5.1
// - protoc (unknown)
// source: vault/v1/tx.proto
// source: dwn/v1/tx.proto
package vaultv1
package dwnv1
import (
context "context"
@ -19,7 +19,8 @@ import (
const _ = grpc.SupportPackageIsVersion9
const (
Msg_UpdateParams_FullMethodName = "/vault.v1.Msg/UpdateParams"
Msg_UpdateParams_FullMethodName = "/dwn.v1.Msg/UpdateParams"
Msg_Initialize_FullMethodName = "/dwn.v1.Msg/Initialize"
)
// MsgClient is the client API for Msg service.
@ -32,6 +33,8 @@ type MsgClient interface {
//
// Since: cosmos-sdk 0.47
UpdateParams(ctx context.Context, in *MsgUpdateParams, opts ...grpc.CallOption) (*MsgUpdateParamsResponse, error)
// Initialize spawns a new Vault
Initialize(ctx context.Context, in *MsgInitialize, opts ...grpc.CallOption) (*MsgInitializeResponse, error)
}
type msgClient struct {
@ -52,6 +55,16 @@ func (c *msgClient) UpdateParams(ctx context.Context, in *MsgUpdateParams, opts
return out, nil
}
func (c *msgClient) Initialize(ctx context.Context, in *MsgInitialize, opts ...grpc.CallOption) (*MsgInitializeResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(MsgInitializeResponse)
err := c.cc.Invoke(ctx, Msg_Initialize_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
// MsgServer is the server API for Msg service.
// All implementations must embed UnimplementedMsgServer
// for forward compatibility.
@ -62,6 +75,8 @@ type MsgServer interface {
//
// Since: cosmos-sdk 0.47
UpdateParams(context.Context, *MsgUpdateParams) (*MsgUpdateParamsResponse, error)
// Initialize spawns a new Vault
Initialize(context.Context, *MsgInitialize) (*MsgInitializeResponse, error)
mustEmbedUnimplementedMsgServer()
}
@ -75,6 +90,9 @@ type UnimplementedMsgServer struct{}
func (UnimplementedMsgServer) UpdateParams(context.Context, *MsgUpdateParams) (*MsgUpdateParamsResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method UpdateParams not implemented")
}
func (UnimplementedMsgServer) Initialize(context.Context, *MsgInitialize) (*MsgInitializeResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Initialize not implemented")
}
func (UnimplementedMsgServer) mustEmbedUnimplementedMsgServer() {}
func (UnimplementedMsgServer) testEmbeddedByValue() {}
@ -114,18 +132,40 @@ func _Msg_UpdateParams_Handler(srv interface{}, ctx context.Context, dec func(in
return interceptor(ctx, in, info, handler)
}
func _Msg_Initialize_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(MsgInitialize)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(MsgServer).Initialize(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Msg_Initialize_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(MsgServer).Initialize(ctx, req.(*MsgInitialize))
}
return interceptor(ctx, in, info, handler)
}
// Msg_ServiceDesc is the grpc.ServiceDesc for Msg service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var Msg_ServiceDesc = grpc.ServiceDesc{
ServiceName: "vault.v1.Msg",
ServiceName: "dwn.v1.Msg",
HandlerType: (*MsgServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "UpdateParams",
Handler: _Msg_UpdateParams_Handler,
},
{
MethodName: "Initialize",
Handler: _Msg_Initialize_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "vault/v1/tx.proto",
Metadata: "dwn/v1/tx.proto",
}

View File

@ -18,8 +18,8 @@ var (
)
func init() {
file_service_module_v1_module_proto_init()
md_Module = File_service_module_v1_module_proto.Messages().ByName("Module")
file_svc_module_v1_module_proto_init()
md_Module = File_svc_module_v1_module_proto.Messages().ByName("Module")
}
var _ protoreflect.Message = (*fastReflection_Module)(nil)
@ -31,7 +31,7 @@ func (x *Module) ProtoReflect() protoreflect.Message {
}
func (x *Module) slowProtoReflect() protoreflect.Message {
mi := &file_service_module_v1_module_proto_msgTypes[0]
mi := &file_svc_module_v1_module_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@ -104,9 +104,9 @@ func (x *fastReflection_Module) Has(fd protoreflect.FieldDescriptor) bool {
switch fd.FullName() {
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: onsonr.sonr.service.module.v1.Module"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: svc.module.v1.Module"))
}
panic(fmt.Errorf("message onsonr.sonr.service.module.v1.Module does not contain field %s", fd.FullName()))
panic(fmt.Errorf("message svc.module.v1.Module does not contain field %s", fd.FullName()))
}
}
@ -120,9 +120,9 @@ func (x *fastReflection_Module) Clear(fd protoreflect.FieldDescriptor) {
switch fd.FullName() {
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: onsonr.sonr.service.module.v1.Module"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: svc.module.v1.Module"))
}
panic(fmt.Errorf("message onsonr.sonr.service.module.v1.Module does not contain field %s", fd.FullName()))
panic(fmt.Errorf("message svc.module.v1.Module does not contain field %s", fd.FullName()))
}
}
@ -136,9 +136,9 @@ func (x *fastReflection_Module) Get(descriptor protoreflect.FieldDescriptor) pro
switch descriptor.FullName() {
default:
if descriptor.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: onsonr.sonr.service.module.v1.Module"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: svc.module.v1.Module"))
}
panic(fmt.Errorf("message onsonr.sonr.service.module.v1.Module does not contain field %s", descriptor.FullName()))
panic(fmt.Errorf("message svc.module.v1.Module does not contain field %s", descriptor.FullName()))
}
}
@ -156,9 +156,9 @@ func (x *fastReflection_Module) Set(fd protoreflect.FieldDescriptor, value proto
switch fd.FullName() {
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: onsonr.sonr.service.module.v1.Module"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: svc.module.v1.Module"))
}
panic(fmt.Errorf("message onsonr.sonr.service.module.v1.Module does not contain field %s", fd.FullName()))
panic(fmt.Errorf("message svc.module.v1.Module does not contain field %s", fd.FullName()))
}
}
@ -176,9 +176,9 @@ func (x *fastReflection_Module) Mutable(fd protoreflect.FieldDescriptor) protore
switch fd.FullName() {
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: onsonr.sonr.service.module.v1.Module"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: svc.module.v1.Module"))
}
panic(fmt.Errorf("message onsonr.sonr.service.module.v1.Module does not contain field %s", fd.FullName()))
panic(fmt.Errorf("message svc.module.v1.Module does not contain field %s", fd.FullName()))
}
}
@ -189,9 +189,9 @@ func (x *fastReflection_Module) NewField(fd protoreflect.FieldDescriptor) protor
switch fd.FullName() {
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: onsonr.sonr.service.module.v1.Module"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: svc.module.v1.Module"))
}
panic(fmt.Errorf("message onsonr.sonr.service.module.v1.Module does not contain field %s", fd.FullName()))
panic(fmt.Errorf("message svc.module.v1.Module does not contain field %s", fd.FullName()))
}
}
@ -201,7 +201,7 @@ func (x *fastReflection_Module) NewField(fd protoreflect.FieldDescriptor) protor
func (x *fastReflection_Module) WhichOneof(d protoreflect.OneofDescriptor) protoreflect.FieldDescriptor {
switch d.FullName() {
default:
panic(fmt.Errorf("%s is not a oneof field in onsonr.sonr.service.module.v1.Module", d.FullName()))
panic(fmt.Errorf("%s is not a oneof field in svc.module.v1.Module", d.FullName()))
}
panic("unreachable")
}
@ -373,7 +373,7 @@ func (x *fastReflection_Module) ProtoMethods() *protoiface.Methods {
// versions:
// protoc-gen-go v1.27.0
// protoc (unknown)
// source: service/module/v1/module.proto
// source: svc/module/v1/module.proto
const (
// Verify that this generated code is sufficiently up-to-date.
@ -393,7 +393,7 @@ type Module struct {
func (x *Module) Reset() {
*x = Module{}
if protoimpl.UnsafeEnabled {
mi := &file_service_module_v1_module_proto_msgTypes[0]
mi := &file_svc_module_v1_module_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@ -407,57 +407,50 @@ func (*Module) ProtoMessage() {}
// Deprecated: Use Module.ProtoReflect.Descriptor instead.
func (*Module) Descriptor() ([]byte, []int) {
return file_service_module_v1_module_proto_rawDescGZIP(), []int{0}
return file_svc_module_v1_module_proto_rawDescGZIP(), []int{0}
}
var File_service_module_v1_module_proto protoreflect.FileDescriptor
var File_svc_module_v1_module_proto protoreflect.FileDescriptor
var file_service_module_v1_module_proto_rawDesc = []byte{
0x0a, 0x1e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65,
0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x12, 0x1d, 0x6f, 0x6e, 0x73, 0x6f, 0x6e, 0x72, 0x2e, 0x73, 0x6f, 0x6e, 0x72, 0x2e, 0x73, 0x65,
0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x1a,
0x20, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2f, 0x61, 0x70, 0x70, 0x2f, 0x76, 0x31, 0x61, 0x6c,
0x70, 0x68, 0x61, 0x31, 0x2f, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x22, 0x32, 0x0a, 0x06, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x3a, 0x28, 0xba, 0xc0, 0x96,
0xda, 0x01, 0x22, 0x0a, 0x20, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
0x6f, 0x6e, 0x73, 0x6f, 0x6e, 0x72, 0x2f, 0x73, 0x6f, 0x6e, 0x72, 0x2f, 0x78, 0x2f, 0x73, 0x65,
0x72, 0x76, 0x69, 0x63, 0x65, 0x42, 0x80, 0x02, 0x0a, 0x21, 0x63, 0x6f, 0x6d, 0x2e, 0x6f, 0x6e,
0x73, 0x6f, 0x6e, 0x72, 0x2e, 0x73, 0x6f, 0x6e, 0x72, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
0x65, 0x2e, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x4d, 0x6f, 0x64,
0x75, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68,
0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6f, 0x6e, 0x73, 0x6f, 0x6e, 0x72, 0x2f, 0x73, 0x6f,
0x6e, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x6d,
0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x3b, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x76,
0x31, 0xa2, 0x02, 0x04, 0x4f, 0x53, 0x53, 0x4d, 0xaa, 0x02, 0x1d, 0x4f, 0x6e, 0x73, 0x6f, 0x6e,
0x72, 0x2e, 0x53, 0x6f, 0x6e, 0x72, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x4d,
0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x1d, 0x4f, 0x6e, 0x73, 0x6f, 0x6e,
0x72, 0x5c, 0x53, 0x6f, 0x6e, 0x72, 0x5c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5c, 0x4d,
0x6f, 0x64, 0x75, 0x6c, 0x65, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x29, 0x4f, 0x6e, 0x73, 0x6f, 0x6e,
0x72, 0x5c, 0x53, 0x6f, 0x6e, 0x72, 0x5c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5c, 0x4d,
0x6f, 0x64, 0x75, 0x6c, 0x65, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61,
0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x21, 0x4f, 0x6e, 0x73, 0x6f, 0x6e, 0x72, 0x3a, 0x3a, 0x53,
0x6f, 0x6e, 0x72, 0x3a, 0x3a, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x3a, 0x3a, 0x4d, 0x6f,
0x64, 0x75, 0x6c, 0x65, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
var file_svc_module_v1_module_proto_rawDesc = []byte{
0x0a, 0x1a, 0x73, 0x76, 0x63, 0x2f, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f,
0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x73, 0x76,
0x63, 0x2e, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x20, 0x63, 0x6f, 0x73,
0x6d, 0x6f, 0x73, 0x2f, 0x61, 0x70, 0x70, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31,
0x2f, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x28, 0x0a,
0x06, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x3a, 0x1e, 0xba, 0xc0, 0x96, 0xda, 0x01, 0x18, 0x0a,
0x16, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6f, 0x6e, 0x73, 0x6f,
0x6e, 0x72, 0x2f, 0x73, 0x6f, 0x6e, 0x72, 0x42, 0xa9, 0x01, 0x0a, 0x11, 0x63, 0x6f, 0x6d, 0x2e,
0x73, 0x76, 0x63, 0x2e, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x4d,
0x6f, 0x64, 0x75, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x31, 0x67, 0x69,
0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6f, 0x6e, 0x73, 0x6f, 0x6e, 0x72, 0x2f,
0x73, 0x6f, 0x6e, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x76, 0x63, 0x2f, 0x6d, 0x6f, 0x64,
0x75, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x3b, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x76, 0x31, 0xa2,
0x02, 0x03, 0x53, 0x4d, 0x58, 0xaa, 0x02, 0x0d, 0x53, 0x76, 0x63, 0x2e, 0x4d, 0x6f, 0x64, 0x75,
0x6c, 0x65, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x0d, 0x53, 0x76, 0x63, 0x5c, 0x4d, 0x6f, 0x64, 0x75,
0x6c, 0x65, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x19, 0x53, 0x76, 0x63, 0x5c, 0x4d, 0x6f, 0x64, 0x75,
0x6c, 0x65, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74,
0x61, 0xea, 0x02, 0x0f, 0x53, 0x76, 0x63, 0x3a, 0x3a, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x3a,
0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_service_module_v1_module_proto_rawDescOnce sync.Once
file_service_module_v1_module_proto_rawDescData = file_service_module_v1_module_proto_rawDesc
file_svc_module_v1_module_proto_rawDescOnce sync.Once
file_svc_module_v1_module_proto_rawDescData = file_svc_module_v1_module_proto_rawDesc
)
func file_service_module_v1_module_proto_rawDescGZIP() []byte {
file_service_module_v1_module_proto_rawDescOnce.Do(func() {
file_service_module_v1_module_proto_rawDescData = protoimpl.X.CompressGZIP(file_service_module_v1_module_proto_rawDescData)
func file_svc_module_v1_module_proto_rawDescGZIP() []byte {
file_svc_module_v1_module_proto_rawDescOnce.Do(func() {
file_svc_module_v1_module_proto_rawDescData = protoimpl.X.CompressGZIP(file_svc_module_v1_module_proto_rawDescData)
})
return file_service_module_v1_module_proto_rawDescData
return file_svc_module_v1_module_proto_rawDescData
}
var file_service_module_v1_module_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
var file_service_module_v1_module_proto_goTypes = []interface{}{
(*Module)(nil), // 0: onsonr.sonr.service.module.v1.Module
var file_svc_module_v1_module_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
var file_svc_module_v1_module_proto_goTypes = []interface{}{
(*Module)(nil), // 0: svc.module.v1.Module
}
var file_service_module_v1_module_proto_depIdxs = []int32{
var file_svc_module_v1_module_proto_depIdxs = []int32{
0, // [0:0] is the sub-list for method output_type
0, // [0:0] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
@ -465,13 +458,13 @@ var file_service_module_v1_module_proto_depIdxs = []int32{
0, // [0:0] is the sub-list for field type_name
}
func init() { file_service_module_v1_module_proto_init() }
func file_service_module_v1_module_proto_init() {
if File_service_module_v1_module_proto != nil {
func init() { file_svc_module_v1_module_proto_init() }
func file_svc_module_v1_module_proto_init() {
if File_svc_module_v1_module_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_service_module_v1_module_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
file_svc_module_v1_module_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Module); i {
case 0:
return &v.state
@ -488,18 +481,18 @@ func file_service_module_v1_module_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_service_module_v1_module_proto_rawDesc,
RawDescriptor: file_svc_module_v1_module_proto_rawDesc,
NumEnums: 0,
NumMessages: 1,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_service_module_v1_module_proto_goTypes,
DependencyIndexes: file_service_module_v1_module_proto_depIdxs,
MessageInfos: file_service_module_v1_module_proto_msgTypes,
GoTypes: file_svc_module_v1_module_proto_goTypes,
DependencyIndexes: file_svc_module_v1_module_proto_depIdxs,
MessageInfos: file_svc_module_v1_module_proto_msgTypes,
}.Build()
File_service_module_v1_module_proto = out.File
file_service_module_v1_module_proto_rawDesc = nil
file_service_module_v1_module_proto_goTypes = nil
file_service_module_v1_module_proto_depIdxs = nil
File_svc_module_v1_module_proto = out.File
file_svc_module_v1_module_proto_rawDesc = nil
file_svc_module_v1_module_proto_goTypes = nil
file_svc_module_v1_module_proto_depIdxs = nil
}

View File

@ -1,5 +1,5 @@
// Code generated by protoc-gen-go-pulsar. DO NOT EDIT.
package servicev1
package svcv1
import (
fmt "fmt"
@ -18,8 +18,8 @@ var (
)
func init() {
file_service_v1_query_proto_init()
md_QueryParamsRequest = File_service_v1_query_proto.Messages().ByName("QueryParamsRequest")
file_svc_v1_query_proto_init()
md_QueryParamsRequest = File_svc_v1_query_proto.Messages().ByName("QueryParamsRequest")
}
var _ protoreflect.Message = (*fastReflection_QueryParamsRequest)(nil)
@ -31,7 +31,7 @@ func (x *QueryParamsRequest) ProtoReflect() protoreflect.Message {
}
func (x *QueryParamsRequest) slowProtoReflect() protoreflect.Message {
mi := &file_service_v1_query_proto_msgTypes[0]
mi := &file_svc_v1_query_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@ -104,9 +104,9 @@ func (x *fastReflection_QueryParamsRequest) Has(fd protoreflect.FieldDescriptor)
switch fd.FullName() {
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: service.v1.QueryParamsRequest"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: svc.v1.QueryParamsRequest"))
}
panic(fmt.Errorf("message service.v1.QueryParamsRequest does not contain field %s", fd.FullName()))
panic(fmt.Errorf("message svc.v1.QueryParamsRequest does not contain field %s", fd.FullName()))
}
}
@ -120,9 +120,9 @@ func (x *fastReflection_QueryParamsRequest) Clear(fd protoreflect.FieldDescripto
switch fd.FullName() {
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: service.v1.QueryParamsRequest"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: svc.v1.QueryParamsRequest"))
}
panic(fmt.Errorf("message service.v1.QueryParamsRequest does not contain field %s", fd.FullName()))
panic(fmt.Errorf("message svc.v1.QueryParamsRequest does not contain field %s", fd.FullName()))
}
}
@ -136,9 +136,9 @@ func (x *fastReflection_QueryParamsRequest) Get(descriptor protoreflect.FieldDes
switch descriptor.FullName() {
default:
if descriptor.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: service.v1.QueryParamsRequest"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: svc.v1.QueryParamsRequest"))
}
panic(fmt.Errorf("message service.v1.QueryParamsRequest does not contain field %s", descriptor.FullName()))
panic(fmt.Errorf("message svc.v1.QueryParamsRequest does not contain field %s", descriptor.FullName()))
}
}
@ -156,9 +156,9 @@ func (x *fastReflection_QueryParamsRequest) Set(fd protoreflect.FieldDescriptor,
switch fd.FullName() {
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: service.v1.QueryParamsRequest"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: svc.v1.QueryParamsRequest"))
}
panic(fmt.Errorf("message service.v1.QueryParamsRequest does not contain field %s", fd.FullName()))
panic(fmt.Errorf("message svc.v1.QueryParamsRequest does not contain field %s", fd.FullName()))
}
}
@ -176,9 +176,9 @@ func (x *fastReflection_QueryParamsRequest) Mutable(fd protoreflect.FieldDescrip
switch fd.FullName() {
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: service.v1.QueryParamsRequest"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: svc.v1.QueryParamsRequest"))
}
panic(fmt.Errorf("message service.v1.QueryParamsRequest does not contain field %s", fd.FullName()))
panic(fmt.Errorf("message svc.v1.QueryParamsRequest does not contain field %s", fd.FullName()))
}
}
@ -189,9 +189,9 @@ func (x *fastReflection_QueryParamsRequest) NewField(fd protoreflect.FieldDescri
switch fd.FullName() {
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: service.v1.QueryParamsRequest"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: svc.v1.QueryParamsRequest"))
}
panic(fmt.Errorf("message service.v1.QueryParamsRequest does not contain field %s", fd.FullName()))
panic(fmt.Errorf("message svc.v1.QueryParamsRequest does not contain field %s", fd.FullName()))
}
}
@ -201,7 +201,7 @@ func (x *fastReflection_QueryParamsRequest) NewField(fd protoreflect.FieldDescri
func (x *fastReflection_QueryParamsRequest) WhichOneof(d protoreflect.OneofDescriptor) protoreflect.FieldDescriptor {
switch d.FullName() {
default:
panic(fmt.Errorf("%s is not a oneof field in service.v1.QueryParamsRequest", d.FullName()))
panic(fmt.Errorf("%s is not a oneof field in svc.v1.QueryParamsRequest", d.FullName()))
}
panic("unreachable")
}
@ -375,8 +375,8 @@ var (
)
func init() {
file_service_v1_query_proto_init()
md_QueryParamsResponse = File_service_v1_query_proto.Messages().ByName("QueryParamsResponse")
file_svc_v1_query_proto_init()
md_QueryParamsResponse = File_svc_v1_query_proto.Messages().ByName("QueryParamsResponse")
fd_QueryParamsResponse_params = md_QueryParamsResponse.Fields().ByName("params")
}
@ -389,7 +389,7 @@ func (x *QueryParamsResponse) ProtoReflect() protoreflect.Message {
}
func (x *QueryParamsResponse) slowProtoReflect() protoreflect.Message {
mi := &file_service_v1_query_proto_msgTypes[1]
mi := &file_svc_v1_query_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@ -466,13 +466,13 @@ func (x *fastReflection_QueryParamsResponse) Range(f func(protoreflect.FieldDesc
// a repeated field is populated if it is non-empty.
func (x *fastReflection_QueryParamsResponse) Has(fd protoreflect.FieldDescriptor) bool {
switch fd.FullName() {
case "service.v1.QueryParamsResponse.params":
case "svc.v1.QueryParamsResponse.params":
return x.Params != nil
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: service.v1.QueryParamsResponse"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: svc.v1.QueryParamsResponse"))
}
panic(fmt.Errorf("message service.v1.QueryParamsResponse does not contain field %s", fd.FullName()))
panic(fmt.Errorf("message svc.v1.QueryParamsResponse does not contain field %s", fd.FullName()))
}
}
@ -484,13 +484,13 @@ func (x *fastReflection_QueryParamsResponse) Has(fd protoreflect.FieldDescriptor
// Clear is a mutating operation and unsafe for concurrent use.
func (x *fastReflection_QueryParamsResponse) Clear(fd protoreflect.FieldDescriptor) {
switch fd.FullName() {
case "service.v1.QueryParamsResponse.params":
case "svc.v1.QueryParamsResponse.params":
x.Params = nil
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: service.v1.QueryParamsResponse"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: svc.v1.QueryParamsResponse"))
}
panic(fmt.Errorf("message service.v1.QueryParamsResponse does not contain field %s", fd.FullName()))
panic(fmt.Errorf("message svc.v1.QueryParamsResponse does not contain field %s", fd.FullName()))
}
}
@ -502,14 +502,14 @@ func (x *fastReflection_QueryParamsResponse) Clear(fd protoreflect.FieldDescript
// of the value; to obtain a mutable reference, use Mutable.
func (x *fastReflection_QueryParamsResponse) Get(descriptor protoreflect.FieldDescriptor) protoreflect.Value {
switch descriptor.FullName() {
case "service.v1.QueryParamsResponse.params":
case "svc.v1.QueryParamsResponse.params":
value := x.Params
return protoreflect.ValueOfMessage(value.ProtoReflect())
default:
if descriptor.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: service.v1.QueryParamsResponse"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: svc.v1.QueryParamsResponse"))
}
panic(fmt.Errorf("message service.v1.QueryParamsResponse does not contain field %s", descriptor.FullName()))
panic(fmt.Errorf("message svc.v1.QueryParamsResponse does not contain field %s", descriptor.FullName()))
}
}
@ -525,13 +525,13 @@ func (x *fastReflection_QueryParamsResponse) Get(descriptor protoreflect.FieldDe
// Set is a mutating operation and unsafe for concurrent use.
func (x *fastReflection_QueryParamsResponse) Set(fd protoreflect.FieldDescriptor, value protoreflect.Value) {
switch fd.FullName() {
case "service.v1.QueryParamsResponse.params":
case "svc.v1.QueryParamsResponse.params":
x.Params = value.Message().Interface().(*Params)
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: service.v1.QueryParamsResponse"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: svc.v1.QueryParamsResponse"))
}
panic(fmt.Errorf("message service.v1.QueryParamsResponse does not contain field %s", fd.FullName()))
panic(fmt.Errorf("message svc.v1.QueryParamsResponse does not contain field %s", fd.FullName()))
}
}
@ -547,16 +547,16 @@ func (x *fastReflection_QueryParamsResponse) Set(fd protoreflect.FieldDescriptor
// Mutable is a mutating operation and unsafe for concurrent use.
func (x *fastReflection_QueryParamsResponse) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value {
switch fd.FullName() {
case "service.v1.QueryParamsResponse.params":
case "svc.v1.QueryParamsResponse.params":
if x.Params == nil {
x.Params = new(Params)
}
return protoreflect.ValueOfMessage(x.Params.ProtoReflect())
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: service.v1.QueryParamsResponse"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: svc.v1.QueryParamsResponse"))
}
panic(fmt.Errorf("message service.v1.QueryParamsResponse does not contain field %s", fd.FullName()))
panic(fmt.Errorf("message svc.v1.QueryParamsResponse does not contain field %s", fd.FullName()))
}
}
@ -565,14 +565,14 @@ func (x *fastReflection_QueryParamsResponse) Mutable(fd protoreflect.FieldDescri
// For lists, maps, and messages, this returns a new, empty, mutable value.
func (x *fastReflection_QueryParamsResponse) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value {
switch fd.FullName() {
case "service.v1.QueryParamsResponse.params":
case "svc.v1.QueryParamsResponse.params":
m := new(Params)
return protoreflect.ValueOfMessage(m.ProtoReflect())
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: service.v1.QueryParamsResponse"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: svc.v1.QueryParamsResponse"))
}
panic(fmt.Errorf("message service.v1.QueryParamsResponse does not contain field %s", fd.FullName()))
panic(fmt.Errorf("message svc.v1.QueryParamsResponse does not contain field %s", fd.FullName()))
}
}
@ -582,7 +582,7 @@ func (x *fastReflection_QueryParamsResponse) NewField(fd protoreflect.FieldDescr
func (x *fastReflection_QueryParamsResponse) WhichOneof(d protoreflect.OneofDescriptor) protoreflect.FieldDescriptor {
switch d.FullName() {
default:
panic(fmt.Errorf("%s is not a oneof field in service.v1.QueryParamsResponse", d.FullName()))
panic(fmt.Errorf("%s is not a oneof field in svc.v1.QueryParamsResponse", d.FullName()))
}
panic("unreachable")
}
@ -808,7 +808,7 @@ func (x *fastReflection_QueryParamsResponse) ProtoMethods() *protoiface.Methods
// versions:
// protoc-gen-go v1.27.0
// protoc (unknown)
// source: service/v1/query.proto
// source: svc/v1/query.proto
const (
// Verify that this generated code is sufficiently up-to-date.
@ -827,7 +827,7 @@ type QueryParamsRequest struct {
func (x *QueryParamsRequest) Reset() {
*x = QueryParamsRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_service_v1_query_proto_msgTypes[0]
mi := &file_svc_v1_query_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@ -841,7 +841,7 @@ func (*QueryParamsRequest) ProtoMessage() {}
// Deprecated: Use QueryParamsRequest.ProtoReflect.Descriptor instead.
func (*QueryParamsRequest) Descriptor() ([]byte, []int) {
return file_service_v1_query_proto_rawDescGZIP(), []int{0}
return file_svc_v1_query_proto_rawDescGZIP(), []int{0}
}
// QueryParamsResponse is the response type for the Query/Params RPC method.
@ -857,7 +857,7 @@ type QueryParamsResponse struct {
func (x *QueryParamsResponse) Reset() {
*x = QueryParamsResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_service_v1_query_proto_msgTypes[1]
mi := &file_svc_v1_query_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@ -871,7 +871,7 @@ func (*QueryParamsResponse) ProtoMessage() {}
// Deprecated: Use QueryParamsResponse.ProtoReflect.Descriptor instead.
func (*QueryParamsResponse) Descriptor() ([]byte, []int) {
return file_service_v1_query_proto_rawDescGZIP(), []int{1}
return file_svc_v1_query_proto_rawDescGZIP(), []int{1}
}
func (x *QueryParamsResponse) GetParams() *Params {
@ -881,62 +881,58 @@ func (x *QueryParamsResponse) GetParams() *Params {
return nil
}
var File_service_v1_query_proto protoreflect.FileDescriptor
var File_svc_v1_query_proto protoreflect.FileDescriptor
var file_service_v1_query_proto_rawDesc = []byte{
0x0a, 0x16, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x71, 0x75, 0x65,
0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
0x65, 0x2e, 0x76, 0x31, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69,
0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x1a, 0x18, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x67,
0x65, 0x6e, 0x65, 0x73, 0x69, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x14, 0x0a, 0x12,
0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
0x73, 0x74, 0x22, 0x41, 0x0a, 0x13, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d,
0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x70, 0x61, 0x72,
0x61, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x73, 0x65, 0x72, 0x76,
0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x06, 0x70,
0x61, 0x72, 0x61, 0x6d, 0x73, 0x32, 0x6e, 0x0a, 0x05, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x65,
0x0a, 0x06, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x1e, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69,
0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d,
0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69,
0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d,
0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1a, 0x82, 0xd3, 0xe4, 0x93, 0x02,
0x14, 0x12, 0x12, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x70,
0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x96, 0x01, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x73, 0x65,
0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x0a, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50,
0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63,
0x6f, 0x6d, 0x2f, 0x6f, 0x6e, 0x73, 0x6f, 0x6e, 0x72, 0x2f, 0x73, 0x6f, 0x6e, 0x72, 0x2f, 0x61,
0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x3b, 0x73, 0x65,
0x72, 0x76, 0x69, 0x63, 0x65, 0x76, 0x31, 0xa2, 0x02, 0x03, 0x53, 0x58, 0x58, 0xaa, 0x02, 0x0a,
0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x0a, 0x53, 0x65, 0x72,
0x76, 0x69, 0x63, 0x65, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
0x65, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61,
0xea, 0x02, 0x0b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
var file_svc_v1_query_proto_rawDesc = []byte{
0x0a, 0x12, 0x73, 0x76, 0x63, 0x2f, 0x76, 0x31, 0x2f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x73, 0x76, 0x63, 0x2e, 0x76, 0x31, 0x1a, 0x1c, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x14, 0x73, 0x76, 0x63, 0x2f,
0x76, 0x31, 0x2f, 0x67, 0x65, 0x6e, 0x65, 0x73, 0x69, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x22, 0x14, 0x0a, 0x12, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x3d, 0x0a, 0x13, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50,
0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a,
0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e,
0x73, 0x76, 0x63, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x06, 0x70,
0x61, 0x72, 0x61, 0x6d, 0x73, 0x32, 0x62, 0x0a, 0x05, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x59,
0x0a, 0x06, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x1a, 0x2e, 0x73, 0x76, 0x63, 0x2e, 0x76,
0x31, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x65, 0x71,
0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x73, 0x76, 0x63, 0x2e, 0x76, 0x31, 0x2e, 0x51, 0x75,
0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
0x65, 0x22, 0x16, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x10, 0x12, 0x0e, 0x2f, 0x73, 0x76, 0x63, 0x2f,
0x76, 0x31, 0x2f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x7a, 0x0a, 0x0a, 0x63, 0x6f, 0x6d,
0x2e, 0x73, 0x76, 0x63, 0x2e, 0x76, 0x31, 0x42, 0x0a, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x72,
0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f,
0x6d, 0x2f, 0x6f, 0x6e, 0x73, 0x6f, 0x6e, 0x72, 0x2f, 0x73, 0x6f, 0x6e, 0x72, 0x2f, 0x61, 0x70,
0x69, 0x2f, 0x73, 0x76, 0x63, 0x2f, 0x76, 0x31, 0x3b, 0x73, 0x76, 0x63, 0x76, 0x31, 0xa2, 0x02,
0x03, 0x53, 0x58, 0x58, 0xaa, 0x02, 0x06, 0x53, 0x76, 0x63, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x06,
0x53, 0x76, 0x63, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x12, 0x53, 0x76, 0x63, 0x5c, 0x56, 0x31, 0x5c,
0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x07, 0x53, 0x76,
0x63, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_service_v1_query_proto_rawDescOnce sync.Once
file_service_v1_query_proto_rawDescData = file_service_v1_query_proto_rawDesc
file_svc_v1_query_proto_rawDescOnce sync.Once
file_svc_v1_query_proto_rawDescData = file_svc_v1_query_proto_rawDesc
)
func file_service_v1_query_proto_rawDescGZIP() []byte {
file_service_v1_query_proto_rawDescOnce.Do(func() {
file_service_v1_query_proto_rawDescData = protoimpl.X.CompressGZIP(file_service_v1_query_proto_rawDescData)
func file_svc_v1_query_proto_rawDescGZIP() []byte {
file_svc_v1_query_proto_rawDescOnce.Do(func() {
file_svc_v1_query_proto_rawDescData = protoimpl.X.CompressGZIP(file_svc_v1_query_proto_rawDescData)
})
return file_service_v1_query_proto_rawDescData
return file_svc_v1_query_proto_rawDescData
}
var file_service_v1_query_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
var file_service_v1_query_proto_goTypes = []interface{}{
(*QueryParamsRequest)(nil), // 0: service.v1.QueryParamsRequest
(*QueryParamsResponse)(nil), // 1: service.v1.QueryParamsResponse
(*Params)(nil), // 2: service.v1.Params
var file_svc_v1_query_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
var file_svc_v1_query_proto_goTypes = []interface{}{
(*QueryParamsRequest)(nil), // 0: svc.v1.QueryParamsRequest
(*QueryParamsResponse)(nil), // 1: svc.v1.QueryParamsResponse
(*Params)(nil), // 2: svc.v1.Params
}
var file_service_v1_query_proto_depIdxs = []int32{
2, // 0: service.v1.QueryParamsResponse.params:type_name -> service.v1.Params
0, // 1: service.v1.Query.Params:input_type -> service.v1.QueryParamsRequest
1, // 2: service.v1.Query.Params:output_type -> service.v1.QueryParamsResponse
var file_svc_v1_query_proto_depIdxs = []int32{
2, // 0: svc.v1.QueryParamsResponse.params:type_name -> svc.v1.Params
0, // 1: svc.v1.Query.Params:input_type -> svc.v1.QueryParamsRequest
1, // 2: svc.v1.Query.Params:output_type -> svc.v1.QueryParamsResponse
2, // [2:3] is the sub-list for method output_type
1, // [1:2] is the sub-list for method input_type
1, // [1:1] is the sub-list for extension type_name
@ -944,14 +940,14 @@ var file_service_v1_query_proto_depIdxs = []int32{
0, // [0:1] is the sub-list for field type_name
}
func init() { file_service_v1_query_proto_init() }
func file_service_v1_query_proto_init() {
if File_service_v1_query_proto != nil {
func init() { file_svc_v1_query_proto_init() }
func file_svc_v1_query_proto_init() {
if File_svc_v1_query_proto != nil {
return
}
file_service_v1_genesis_proto_init()
file_svc_v1_genesis_proto_init()
if !protoimpl.UnsafeEnabled {
file_service_v1_query_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
file_svc_v1_query_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*QueryParamsRequest); i {
case 0:
return &v.state
@ -963,7 +959,7 @@ func file_service_v1_query_proto_init() {
return nil
}
}
file_service_v1_query_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
file_svc_v1_query_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*QueryParamsResponse); i {
case 0:
return &v.state
@ -980,18 +976,18 @@ func file_service_v1_query_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_service_v1_query_proto_rawDesc,
RawDescriptor: file_svc_v1_query_proto_rawDesc,
NumEnums: 0,
NumMessages: 2,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_service_v1_query_proto_goTypes,
DependencyIndexes: file_service_v1_query_proto_depIdxs,
MessageInfos: file_service_v1_query_proto_msgTypes,
GoTypes: file_svc_v1_query_proto_goTypes,
DependencyIndexes: file_svc_v1_query_proto_depIdxs,
MessageInfos: file_svc_v1_query_proto_msgTypes,
}.Build()
File_service_v1_query_proto = out.File
file_service_v1_query_proto_rawDesc = nil
file_service_v1_query_proto_goTypes = nil
file_service_v1_query_proto_depIdxs = nil
File_svc_v1_query_proto = out.File
file_svc_v1_query_proto_rawDesc = nil
file_svc_v1_query_proto_goTypes = nil
file_svc_v1_query_proto_depIdxs = nil
}

View File

@ -2,9 +2,9 @@
// versions:
// - protoc-gen-go-grpc v1.5.1
// - protoc (unknown)
// source: service/v1/query.proto
// source: svc/v1/query.proto
package servicev1
package svcv1
import (
context "context"
@ -19,7 +19,7 @@ import (
const _ = grpc.SupportPackageIsVersion9
const (
Query_Params_FullMethodName = "/service.v1.Query/Params"
Query_Params_FullMethodName = "/svc.v1.Query/Params"
)
// QueryClient is the client API for Query service.
@ -114,7 +114,7 @@ func _Query_Params_Handler(srv interface{}, ctx context.Context, dec func(interf
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var Query_ServiceDesc = grpc.ServiceDesc{
ServiceName: "service.v1.Query",
ServiceName: "svc.v1.Query",
HandlerType: (*QueryServer)(nil),
Methods: []grpc.MethodDesc{
{
@ -123,5 +123,5 @@ var Query_ServiceDesc = grpc.ServiceDesc{
},
},
Streams: []grpc.StreamDesc{},
Metadata: "service/v1/query.proto",
Metadata: "svc/v1/query.proto",
}

View File

@ -1,6 +1,6 @@
// Code generated by protoc-gen-go-cosmos-orm. DO NOT EDIT.
package servicev1
package svcv1
import (
context "context"
@ -9,19 +9,177 @@ import (
ormerrors "cosmossdk.io/orm/types/ormerrors"
)
type DomainTable interface {
Insert(ctx context.Context, domain *Domain) error
InsertReturningId(ctx context.Context, domain *Domain) (uint64, error)
LastInsertedSequence(ctx context.Context) (uint64, error)
Update(ctx context.Context, domain *Domain) error
Save(ctx context.Context, domain *Domain) error
Delete(ctx context.Context, domain *Domain) error
Has(ctx context.Context, id uint64) (found bool, err error)
// Get returns nil and an error which responds true to ormerrors.IsNotFound() if the record was not found.
Get(ctx context.Context, id uint64) (*Domain, error)
HasByOrigin(ctx context.Context, origin string) (found bool, err error)
// GetByOrigin returns nil and an error which responds true to ormerrors.IsNotFound() if the record was not found.
GetByOrigin(ctx context.Context, origin string) (*Domain, error)
List(ctx context.Context, prefixKey DomainIndexKey, opts ...ormlist.Option) (DomainIterator, error)
ListRange(ctx context.Context, from, to DomainIndexKey, opts ...ormlist.Option) (DomainIterator, error)
DeleteBy(ctx context.Context, prefixKey DomainIndexKey) error
DeleteRange(ctx context.Context, from, to DomainIndexKey) error
doNotImplement()
}
type DomainIterator struct {
ormtable.Iterator
}
func (i DomainIterator) Value() (*Domain, error) {
var domain Domain
err := i.UnmarshalMessage(&domain)
return &domain, err
}
type DomainIndexKey interface {
id() uint32
values() []interface{}
domainIndexKey()
}
// primary key starting index..
type DomainPrimaryKey = DomainIdIndexKey
type DomainIdIndexKey struct {
vs []interface{}
}
func (x DomainIdIndexKey) id() uint32 { return 0 }
func (x DomainIdIndexKey) values() []interface{} { return x.vs }
func (x DomainIdIndexKey) domainIndexKey() {}
func (this DomainIdIndexKey) WithId(id uint64) DomainIdIndexKey {
this.vs = []interface{}{id}
return this
}
type DomainOriginIndexKey struct {
vs []interface{}
}
func (x DomainOriginIndexKey) id() uint32 { return 1 }
func (x DomainOriginIndexKey) values() []interface{} { return x.vs }
func (x DomainOriginIndexKey) domainIndexKey() {}
func (this DomainOriginIndexKey) WithOrigin(origin string) DomainOriginIndexKey {
this.vs = []interface{}{origin}
return this
}
type domainTable struct {
table ormtable.AutoIncrementTable
}
func (this domainTable) Insert(ctx context.Context, domain *Domain) error {
return this.table.Insert(ctx, domain)
}
func (this domainTable) Update(ctx context.Context, domain *Domain) error {
return this.table.Update(ctx, domain)
}
func (this domainTable) Save(ctx context.Context, domain *Domain) error {
return this.table.Save(ctx, domain)
}
func (this domainTable) Delete(ctx context.Context, domain *Domain) error {
return this.table.Delete(ctx, domain)
}
func (this domainTable) InsertReturningId(ctx context.Context, domain *Domain) (uint64, error) {
return this.table.InsertReturningPKey(ctx, domain)
}
func (this domainTable) LastInsertedSequence(ctx context.Context) (uint64, error) {
return this.table.LastInsertedSequence(ctx)
}
func (this domainTable) Has(ctx context.Context, id uint64) (found bool, err error) {
return this.table.PrimaryKey().Has(ctx, id)
}
func (this domainTable) Get(ctx context.Context, id uint64) (*Domain, error) {
var domain Domain
found, err := this.table.PrimaryKey().Get(ctx, &domain, id)
if err != nil {
return nil, err
}
if !found {
return nil, ormerrors.NotFound
}
return &domain, nil
}
func (this domainTable) HasByOrigin(ctx context.Context, origin string) (found bool, err error) {
return this.table.GetIndexByID(1).(ormtable.UniqueIndex).Has(ctx,
origin,
)
}
func (this domainTable) GetByOrigin(ctx context.Context, origin string) (*Domain, error) {
var domain Domain
found, err := this.table.GetIndexByID(1).(ormtable.UniqueIndex).Get(ctx, &domain,
origin,
)
if err != nil {
return nil, err
}
if !found {
return nil, ormerrors.NotFound
}
return &domain, nil
}
func (this domainTable) List(ctx context.Context, prefixKey DomainIndexKey, opts ...ormlist.Option) (DomainIterator, error) {
it, err := this.table.GetIndexByID(prefixKey.id()).List(ctx, prefixKey.values(), opts...)
return DomainIterator{it}, err
}
func (this domainTable) ListRange(ctx context.Context, from, to DomainIndexKey, opts ...ormlist.Option) (DomainIterator, error) {
it, err := this.table.GetIndexByID(from.id()).ListRange(ctx, from.values(), to.values(), opts...)
return DomainIterator{it}, err
}
func (this domainTable) DeleteBy(ctx context.Context, prefixKey DomainIndexKey) error {
return this.table.GetIndexByID(prefixKey.id()).DeleteBy(ctx, prefixKey.values()...)
}
func (this domainTable) DeleteRange(ctx context.Context, from, to DomainIndexKey) error {
return this.table.GetIndexByID(from.id()).DeleteRange(ctx, from.values(), to.values())
}
func (this domainTable) doNotImplement() {}
var _ DomainTable = domainTable{}
func NewDomainTable(db ormtable.Schema) (DomainTable, error) {
table := db.GetTable(&Domain{})
if table == nil {
return nil, ormerrors.TableNotFound.Wrap(string((&Domain{}).ProtoReflect().Descriptor().FullName()))
}
return domainTable{table.(ormtable.AutoIncrementTable)}, nil
}
type MetadataTable interface {
Insert(ctx context.Context, metadata *Metadata) error
InsertReturningId(ctx context.Context, metadata *Metadata) (uint64, error)
LastInsertedSequence(ctx context.Context) (uint64, error)
Update(ctx context.Context, metadata *Metadata) error
Save(ctx context.Context, metadata *Metadata) error
Delete(ctx context.Context, metadata *Metadata) error
Has(ctx context.Context, id uint64) (found bool, err error)
Has(ctx context.Context, id string) (found bool, err error)
// Get returns nil and an error which responds true to ormerrors.IsNotFound() if the record was not found.
Get(ctx context.Context, id uint64) (*Metadata, error)
HasByOrigin(ctx context.Context, origin string) (found bool, err error)
// GetByOrigin returns nil and an error which responds true to ormerrors.IsNotFound() if the record was not found.
GetByOrigin(ctx context.Context, origin string) (*Metadata, error)
Get(ctx context.Context, id string) (*Metadata, error)
HasBySubjectOrigin(ctx context.Context, subject string, origin string) (found bool, err error)
// GetBySubjectOrigin returns nil and an error which responds true to ormerrors.IsNotFound() if the record was not found.
GetBySubjectOrigin(ctx context.Context, subject string, origin string) (*Metadata, error)
List(ctx context.Context, prefixKey MetadataIndexKey, opts ...ormlist.Option) (MetadataIterator, error)
ListRange(ctx context.Context, from, to MetadataIndexKey, opts ...ormlist.Option) (MetadataIterator, error)
DeleteBy(ctx context.Context, prefixKey MetadataIndexKey) error
@ -57,26 +215,31 @@ func (x MetadataIdIndexKey) id() uint32 { return 0 }
func (x MetadataIdIndexKey) values() []interface{} { return x.vs }
func (x MetadataIdIndexKey) metadataIndexKey() {}
func (this MetadataIdIndexKey) WithId(id uint64) MetadataIdIndexKey {
func (this MetadataIdIndexKey) WithId(id string) MetadataIdIndexKey {
this.vs = []interface{}{id}
return this
}
type MetadataOriginIndexKey struct {
type MetadataSubjectOriginIndexKey struct {
vs []interface{}
}
func (x MetadataOriginIndexKey) id() uint32 { return 1 }
func (x MetadataOriginIndexKey) values() []interface{} { return x.vs }
func (x MetadataOriginIndexKey) metadataIndexKey() {}
func (x MetadataSubjectOriginIndexKey) id() uint32 { return 1 }
func (x MetadataSubjectOriginIndexKey) values() []interface{} { return x.vs }
func (x MetadataSubjectOriginIndexKey) metadataIndexKey() {}
func (this MetadataOriginIndexKey) WithOrigin(origin string) MetadataOriginIndexKey {
this.vs = []interface{}{origin}
func (this MetadataSubjectOriginIndexKey) WithSubject(subject string) MetadataSubjectOriginIndexKey {
this.vs = []interface{}{subject}
return this
}
func (this MetadataSubjectOriginIndexKey) WithSubjectOrigin(subject string, origin string) MetadataSubjectOriginIndexKey {
this.vs = []interface{}{subject, origin}
return this
}
type metadataTable struct {
table ormtable.AutoIncrementTable
table ormtable.Table
}
func (this metadataTable) Insert(ctx context.Context, metadata *Metadata) error {
@ -95,19 +258,11 @@ func (this metadataTable) Delete(ctx context.Context, metadata *Metadata) error
return this.table.Delete(ctx, metadata)
}
func (this metadataTable) InsertReturningId(ctx context.Context, metadata *Metadata) (uint64, error) {
return this.table.InsertReturningPKey(ctx, metadata)
}
func (this metadataTable) LastInsertedSequence(ctx context.Context) (uint64, error) {
return this.table.LastInsertedSequence(ctx)
}
func (this metadataTable) Has(ctx context.Context, id uint64) (found bool, err error) {
func (this metadataTable) Has(ctx context.Context, id string) (found bool, err error) {
return this.table.PrimaryKey().Has(ctx, id)
}
func (this metadataTable) Get(ctx context.Context, id uint64) (*Metadata, error) {
func (this metadataTable) Get(ctx context.Context, id string) (*Metadata, error) {
var metadata Metadata
found, err := this.table.PrimaryKey().Get(ctx, &metadata, id)
if err != nil {
@ -119,15 +274,17 @@ func (this metadataTable) Get(ctx context.Context, id uint64) (*Metadata, error)
return &metadata, nil
}
func (this metadataTable) HasByOrigin(ctx context.Context, origin string) (found bool, err error) {
func (this metadataTable) HasBySubjectOrigin(ctx context.Context, subject string, origin string) (found bool, err error) {
return this.table.GetIndexByID(1).(ormtable.UniqueIndex).Has(ctx,
subject,
origin,
)
}
func (this metadataTable) GetByOrigin(ctx context.Context, origin string) (*Metadata, error) {
func (this metadataTable) GetBySubjectOrigin(ctx context.Context, subject string, origin string) (*Metadata, error) {
var metadata Metadata
found, err := this.table.GetIndexByID(1).(ormtable.UniqueIndex).Get(ctx, &metadata,
subject,
origin,
)
if err != nil {
@ -166,203 +323,46 @@ func NewMetadataTable(db ormtable.Schema) (MetadataTable, error) {
if table == nil {
return nil, ormerrors.TableNotFound.Wrap(string((&Metadata{}).ProtoReflect().Descriptor().FullName()))
}
return metadataTable{table.(ormtable.AutoIncrementTable)}, nil
}
type ProfileTable interface {
Insert(ctx context.Context, profile *Profile) error
Update(ctx context.Context, profile *Profile) error
Save(ctx context.Context, profile *Profile) error
Delete(ctx context.Context, profile *Profile) error
Has(ctx context.Context, id string) (found bool, err error)
// Get returns nil and an error which responds true to ormerrors.IsNotFound() if the record was not found.
Get(ctx context.Context, id string) (*Profile, error)
HasBySubjectOrigin(ctx context.Context, subject string, origin string) (found bool, err error)
// GetBySubjectOrigin returns nil and an error which responds true to ormerrors.IsNotFound() if the record was not found.
GetBySubjectOrigin(ctx context.Context, subject string, origin string) (*Profile, error)
List(ctx context.Context, prefixKey ProfileIndexKey, opts ...ormlist.Option) (ProfileIterator, error)
ListRange(ctx context.Context, from, to ProfileIndexKey, opts ...ormlist.Option) (ProfileIterator, error)
DeleteBy(ctx context.Context, prefixKey ProfileIndexKey) error
DeleteRange(ctx context.Context, from, to ProfileIndexKey) error
doNotImplement()
}
type ProfileIterator struct {
ormtable.Iterator
}
func (i ProfileIterator) Value() (*Profile, error) {
var profile Profile
err := i.UnmarshalMessage(&profile)
return &profile, err
}
type ProfileIndexKey interface {
id() uint32
values() []interface{}
profileIndexKey()
}
// primary key starting index..
type ProfilePrimaryKey = ProfileIdIndexKey
type ProfileIdIndexKey struct {
vs []interface{}
}
func (x ProfileIdIndexKey) id() uint32 { return 0 }
func (x ProfileIdIndexKey) values() []interface{} { return x.vs }
func (x ProfileIdIndexKey) profileIndexKey() {}
func (this ProfileIdIndexKey) WithId(id string) ProfileIdIndexKey {
this.vs = []interface{}{id}
return this
}
type ProfileSubjectOriginIndexKey struct {
vs []interface{}
}
func (x ProfileSubjectOriginIndexKey) id() uint32 { return 1 }
func (x ProfileSubjectOriginIndexKey) values() []interface{} { return x.vs }
func (x ProfileSubjectOriginIndexKey) profileIndexKey() {}
func (this ProfileSubjectOriginIndexKey) WithSubject(subject string) ProfileSubjectOriginIndexKey {
this.vs = []interface{}{subject}
return this
}
func (this ProfileSubjectOriginIndexKey) WithSubjectOrigin(subject string, origin string) ProfileSubjectOriginIndexKey {
this.vs = []interface{}{subject, origin}
return this
}
type profileTable struct {
table ormtable.Table
}
func (this profileTable) Insert(ctx context.Context, profile *Profile) error {
return this.table.Insert(ctx, profile)
}
func (this profileTable) Update(ctx context.Context, profile *Profile) error {
return this.table.Update(ctx, profile)
}
func (this profileTable) Save(ctx context.Context, profile *Profile) error {
return this.table.Save(ctx, profile)
}
func (this profileTable) Delete(ctx context.Context, profile *Profile) error {
return this.table.Delete(ctx, profile)
}
func (this profileTable) Has(ctx context.Context, id string) (found bool, err error) {
return this.table.PrimaryKey().Has(ctx, id)
}
func (this profileTable) Get(ctx context.Context, id string) (*Profile, error) {
var profile Profile
found, err := this.table.PrimaryKey().Get(ctx, &profile, id)
if err != nil {
return nil, err
}
if !found {
return nil, ormerrors.NotFound
}
return &profile, nil
}
func (this profileTable) HasBySubjectOrigin(ctx context.Context, subject string, origin string) (found bool, err error) {
return this.table.GetIndexByID(1).(ormtable.UniqueIndex).Has(ctx,
subject,
origin,
)
}
func (this profileTable) GetBySubjectOrigin(ctx context.Context, subject string, origin string) (*Profile, error) {
var profile Profile
found, err := this.table.GetIndexByID(1).(ormtable.UniqueIndex).Get(ctx, &profile,
subject,
origin,
)
if err != nil {
return nil, err
}
if !found {
return nil, ormerrors.NotFound
}
return &profile, nil
}
func (this profileTable) List(ctx context.Context, prefixKey ProfileIndexKey, opts ...ormlist.Option) (ProfileIterator, error) {
it, err := this.table.GetIndexByID(prefixKey.id()).List(ctx, prefixKey.values(), opts...)
return ProfileIterator{it}, err
}
func (this profileTable) ListRange(ctx context.Context, from, to ProfileIndexKey, opts ...ormlist.Option) (ProfileIterator, error) {
it, err := this.table.GetIndexByID(from.id()).ListRange(ctx, from.values(), to.values(), opts...)
return ProfileIterator{it}, err
}
func (this profileTable) DeleteBy(ctx context.Context, prefixKey ProfileIndexKey) error {
return this.table.GetIndexByID(prefixKey.id()).DeleteBy(ctx, prefixKey.values()...)
}
func (this profileTable) DeleteRange(ctx context.Context, from, to ProfileIndexKey) error {
return this.table.GetIndexByID(from.id()).DeleteRange(ctx, from.values(), to.values())
}
func (this profileTable) doNotImplement() {}
var _ ProfileTable = profileTable{}
func NewProfileTable(db ormtable.Schema) (ProfileTable, error) {
table := db.GetTable(&Profile{})
if table == nil {
return nil, ormerrors.TableNotFound.Wrap(string((&Profile{}).ProtoReflect().Descriptor().FullName()))
}
return profileTable{table}, nil
return metadataTable{table}, nil
}
type StateStore interface {
DomainTable() DomainTable
MetadataTable() MetadataTable
ProfileTable() ProfileTable
doNotImplement()
}
type stateStore struct {
domain DomainTable
metadata MetadataTable
profile ProfileTable
}
func (x stateStore) DomainTable() DomainTable {
return x.domain
}
func (x stateStore) MetadataTable() MetadataTable {
return x.metadata
}
func (x stateStore) ProfileTable() ProfileTable {
return x.profile
}
func (stateStore) doNotImplement() {}
var _ StateStore = stateStore{}
func NewStateStore(db ormtable.Schema) (StateStore, error) {
domainTable, err := NewDomainTable(db)
if err != nil {
return nil, err
}
metadataTable, err := NewMetadataTable(db)
if err != nil {
return nil, err
}
profileTable, err := NewProfileTable(db)
if err != nil {
return nil, err
}
return stateStore{
domainTable,
metadataTable,
profileTable,
}, nil
}

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
// Code generated by protoc-gen-go-pulsar. DO NOT EDIT.
package servicev1
package svcv1
import (
_ "cosmossdk.io/api/cosmos/msg/v1"
@ -22,8 +22,8 @@ var (
)
func init() {
file_service_v1_tx_proto_init()
md_MsgUpdateParams = File_service_v1_tx_proto.Messages().ByName("MsgUpdateParams")
file_svc_v1_tx_proto_init()
md_MsgUpdateParams = File_svc_v1_tx_proto.Messages().ByName("MsgUpdateParams")
fd_MsgUpdateParams_authority = md_MsgUpdateParams.Fields().ByName("authority")
fd_MsgUpdateParams_params = md_MsgUpdateParams.Fields().ByName("params")
}
@ -37,7 +37,7 @@ func (x *MsgUpdateParams) ProtoReflect() protoreflect.Message {
}
func (x *MsgUpdateParams) slowProtoReflect() protoreflect.Message {
mi := &file_service_v1_tx_proto_msgTypes[0]
mi := &file_svc_v1_tx_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@ -120,15 +120,15 @@ func (x *fastReflection_MsgUpdateParams) Range(f func(protoreflect.FieldDescript
// a repeated field is populated if it is non-empty.
func (x *fastReflection_MsgUpdateParams) Has(fd protoreflect.FieldDescriptor) bool {
switch fd.FullName() {
case "service.v1.MsgUpdateParams.authority":
case "svc.v1.MsgUpdateParams.authority":
return x.Authority != ""
case "service.v1.MsgUpdateParams.params":
case "svc.v1.MsgUpdateParams.params":
return x.Params != nil
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: service.v1.MsgUpdateParams"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: svc.v1.MsgUpdateParams"))
}
panic(fmt.Errorf("message service.v1.MsgUpdateParams does not contain field %s", fd.FullName()))
panic(fmt.Errorf("message svc.v1.MsgUpdateParams does not contain field %s", fd.FullName()))
}
}
@ -140,15 +140,15 @@ func (x *fastReflection_MsgUpdateParams) Has(fd protoreflect.FieldDescriptor) bo
// Clear is a mutating operation and unsafe for concurrent use.
func (x *fastReflection_MsgUpdateParams) Clear(fd protoreflect.FieldDescriptor) {
switch fd.FullName() {
case "service.v1.MsgUpdateParams.authority":
case "svc.v1.MsgUpdateParams.authority":
x.Authority = ""
case "service.v1.MsgUpdateParams.params":
case "svc.v1.MsgUpdateParams.params":
x.Params = nil
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: service.v1.MsgUpdateParams"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: svc.v1.MsgUpdateParams"))
}
panic(fmt.Errorf("message service.v1.MsgUpdateParams does not contain field %s", fd.FullName()))
panic(fmt.Errorf("message svc.v1.MsgUpdateParams does not contain field %s", fd.FullName()))
}
}
@ -160,17 +160,17 @@ func (x *fastReflection_MsgUpdateParams) Clear(fd protoreflect.FieldDescriptor)
// of the value; to obtain a mutable reference, use Mutable.
func (x *fastReflection_MsgUpdateParams) Get(descriptor protoreflect.FieldDescriptor) protoreflect.Value {
switch descriptor.FullName() {
case "service.v1.MsgUpdateParams.authority":
case "svc.v1.MsgUpdateParams.authority":
value := x.Authority
return protoreflect.ValueOfString(value)
case "service.v1.MsgUpdateParams.params":
case "svc.v1.MsgUpdateParams.params":
value := x.Params
return protoreflect.ValueOfMessage(value.ProtoReflect())
default:
if descriptor.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: service.v1.MsgUpdateParams"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: svc.v1.MsgUpdateParams"))
}
panic(fmt.Errorf("message service.v1.MsgUpdateParams does not contain field %s", descriptor.FullName()))
panic(fmt.Errorf("message svc.v1.MsgUpdateParams does not contain field %s", descriptor.FullName()))
}
}
@ -186,15 +186,15 @@ func (x *fastReflection_MsgUpdateParams) Get(descriptor protoreflect.FieldDescri
// Set is a mutating operation and unsafe for concurrent use.
func (x *fastReflection_MsgUpdateParams) Set(fd protoreflect.FieldDescriptor, value protoreflect.Value) {
switch fd.FullName() {
case "service.v1.MsgUpdateParams.authority":
case "svc.v1.MsgUpdateParams.authority":
x.Authority = value.Interface().(string)
case "service.v1.MsgUpdateParams.params":
case "svc.v1.MsgUpdateParams.params":
x.Params = value.Message().Interface().(*Params)
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: service.v1.MsgUpdateParams"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: svc.v1.MsgUpdateParams"))
}
panic(fmt.Errorf("message service.v1.MsgUpdateParams does not contain field %s", fd.FullName()))
panic(fmt.Errorf("message svc.v1.MsgUpdateParams does not contain field %s", fd.FullName()))
}
}
@ -210,18 +210,18 @@ func (x *fastReflection_MsgUpdateParams) Set(fd protoreflect.FieldDescriptor, va
// Mutable is a mutating operation and unsafe for concurrent use.
func (x *fastReflection_MsgUpdateParams) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value {
switch fd.FullName() {
case "service.v1.MsgUpdateParams.params":
case "svc.v1.MsgUpdateParams.params":
if x.Params == nil {
x.Params = new(Params)
}
return protoreflect.ValueOfMessage(x.Params.ProtoReflect())
case "service.v1.MsgUpdateParams.authority":
panic(fmt.Errorf("field authority of message service.v1.MsgUpdateParams is not mutable"))
case "svc.v1.MsgUpdateParams.authority":
panic(fmt.Errorf("field authority of message svc.v1.MsgUpdateParams is not mutable"))
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: service.v1.MsgUpdateParams"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: svc.v1.MsgUpdateParams"))
}
panic(fmt.Errorf("message service.v1.MsgUpdateParams does not contain field %s", fd.FullName()))
panic(fmt.Errorf("message svc.v1.MsgUpdateParams does not contain field %s", fd.FullName()))
}
}
@ -230,16 +230,16 @@ func (x *fastReflection_MsgUpdateParams) Mutable(fd protoreflect.FieldDescriptor
// For lists, maps, and messages, this returns a new, empty, mutable value.
func (x *fastReflection_MsgUpdateParams) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value {
switch fd.FullName() {
case "service.v1.MsgUpdateParams.authority":
case "svc.v1.MsgUpdateParams.authority":
return protoreflect.ValueOfString("")
case "service.v1.MsgUpdateParams.params":
case "svc.v1.MsgUpdateParams.params":
m := new(Params)
return protoreflect.ValueOfMessage(m.ProtoReflect())
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: service.v1.MsgUpdateParams"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: svc.v1.MsgUpdateParams"))
}
panic(fmt.Errorf("message service.v1.MsgUpdateParams does not contain field %s", fd.FullName()))
panic(fmt.Errorf("message svc.v1.MsgUpdateParams does not contain field %s", fd.FullName()))
}
}
@ -249,7 +249,7 @@ func (x *fastReflection_MsgUpdateParams) NewField(fd protoreflect.FieldDescripto
func (x *fastReflection_MsgUpdateParams) WhichOneof(d protoreflect.OneofDescriptor) protoreflect.FieldDescriptor {
switch d.FullName() {
default:
panic(fmt.Errorf("%s is not a oneof field in service.v1.MsgUpdateParams", d.FullName()))
panic(fmt.Errorf("%s is not a oneof field in svc.v1.MsgUpdateParams", d.FullName()))
}
panic("unreachable")
}
@ -519,8 +519,8 @@ var (
)
func init() {
file_service_v1_tx_proto_init()
md_MsgUpdateParamsResponse = File_service_v1_tx_proto.Messages().ByName("MsgUpdateParamsResponse")
file_svc_v1_tx_proto_init()
md_MsgUpdateParamsResponse = File_svc_v1_tx_proto.Messages().ByName("MsgUpdateParamsResponse")
}
var _ protoreflect.Message = (*fastReflection_MsgUpdateParamsResponse)(nil)
@ -532,7 +532,7 @@ func (x *MsgUpdateParamsResponse) ProtoReflect() protoreflect.Message {
}
func (x *MsgUpdateParamsResponse) slowProtoReflect() protoreflect.Message {
mi := &file_service_v1_tx_proto_msgTypes[1]
mi := &file_svc_v1_tx_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@ -605,9 +605,9 @@ func (x *fastReflection_MsgUpdateParamsResponse) Has(fd protoreflect.FieldDescri
switch fd.FullName() {
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: service.v1.MsgUpdateParamsResponse"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: svc.v1.MsgUpdateParamsResponse"))
}
panic(fmt.Errorf("message service.v1.MsgUpdateParamsResponse does not contain field %s", fd.FullName()))
panic(fmt.Errorf("message svc.v1.MsgUpdateParamsResponse does not contain field %s", fd.FullName()))
}
}
@ -621,9 +621,9 @@ func (x *fastReflection_MsgUpdateParamsResponse) Clear(fd protoreflect.FieldDesc
switch fd.FullName() {
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: service.v1.MsgUpdateParamsResponse"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: svc.v1.MsgUpdateParamsResponse"))
}
panic(fmt.Errorf("message service.v1.MsgUpdateParamsResponse does not contain field %s", fd.FullName()))
panic(fmt.Errorf("message svc.v1.MsgUpdateParamsResponse does not contain field %s", fd.FullName()))
}
}
@ -637,9 +637,9 @@ func (x *fastReflection_MsgUpdateParamsResponse) Get(descriptor protoreflect.Fie
switch descriptor.FullName() {
default:
if descriptor.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: service.v1.MsgUpdateParamsResponse"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: svc.v1.MsgUpdateParamsResponse"))
}
panic(fmt.Errorf("message service.v1.MsgUpdateParamsResponse does not contain field %s", descriptor.FullName()))
panic(fmt.Errorf("message svc.v1.MsgUpdateParamsResponse does not contain field %s", descriptor.FullName()))
}
}
@ -657,9 +657,9 @@ func (x *fastReflection_MsgUpdateParamsResponse) Set(fd protoreflect.FieldDescri
switch fd.FullName() {
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: service.v1.MsgUpdateParamsResponse"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: svc.v1.MsgUpdateParamsResponse"))
}
panic(fmt.Errorf("message service.v1.MsgUpdateParamsResponse does not contain field %s", fd.FullName()))
panic(fmt.Errorf("message svc.v1.MsgUpdateParamsResponse does not contain field %s", fd.FullName()))
}
}
@ -677,9 +677,9 @@ func (x *fastReflection_MsgUpdateParamsResponse) Mutable(fd protoreflect.FieldDe
switch fd.FullName() {
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: service.v1.MsgUpdateParamsResponse"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: svc.v1.MsgUpdateParamsResponse"))
}
panic(fmt.Errorf("message service.v1.MsgUpdateParamsResponse does not contain field %s", fd.FullName()))
panic(fmt.Errorf("message svc.v1.MsgUpdateParamsResponse does not contain field %s", fd.FullName()))
}
}
@ -690,9 +690,9 @@ func (x *fastReflection_MsgUpdateParamsResponse) NewField(fd protoreflect.FieldD
switch fd.FullName() {
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: service.v1.MsgUpdateParamsResponse"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: svc.v1.MsgUpdateParamsResponse"))
}
panic(fmt.Errorf("message service.v1.MsgUpdateParamsResponse does not contain field %s", fd.FullName()))
panic(fmt.Errorf("message svc.v1.MsgUpdateParamsResponse does not contain field %s", fd.FullName()))
}
}
@ -702,7 +702,7 @@ func (x *fastReflection_MsgUpdateParamsResponse) NewField(fd protoreflect.FieldD
func (x *fastReflection_MsgUpdateParamsResponse) WhichOneof(d protoreflect.OneofDescriptor) protoreflect.FieldDescriptor {
switch d.FullName() {
default:
panic(fmt.Errorf("%s is not a oneof field in service.v1.MsgUpdateParamsResponse", d.FullName()))
panic(fmt.Errorf("%s is not a oneof field in svc.v1.MsgUpdateParamsResponse", d.FullName()))
}
panic("unreachable")
}
@ -877,8 +877,8 @@ var (
)
func init() {
file_service_v1_tx_proto_init()
md_MsgRegisterService = File_service_v1_tx_proto.Messages().ByName("MsgRegisterService")
file_svc_v1_tx_proto_init()
md_MsgRegisterService = File_svc_v1_tx_proto.Messages().ByName("MsgRegisterService")
fd_MsgRegisterService_controller = md_MsgRegisterService.Fields().ByName("controller")
fd_MsgRegisterService_service = md_MsgRegisterService.Fields().ByName("service")
}
@ -892,7 +892,7 @@ func (x *MsgRegisterService) ProtoReflect() protoreflect.Message {
}
func (x *MsgRegisterService) slowProtoReflect() protoreflect.Message {
mi := &file_service_v1_tx_proto_msgTypes[2]
mi := &file_svc_v1_tx_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@ -975,15 +975,15 @@ func (x *fastReflection_MsgRegisterService) Range(f func(protoreflect.FieldDescr
// a repeated field is populated if it is non-empty.
func (x *fastReflection_MsgRegisterService) Has(fd protoreflect.FieldDescriptor) bool {
switch fd.FullName() {
case "service.v1.MsgRegisterService.controller":
case "svc.v1.MsgRegisterService.controller":
return x.Controller != ""
case "service.v1.MsgRegisterService.service":
case "svc.v1.MsgRegisterService.service":
return x.Service != nil
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: service.v1.MsgRegisterService"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: svc.v1.MsgRegisterService"))
}
panic(fmt.Errorf("message service.v1.MsgRegisterService does not contain field %s", fd.FullName()))
panic(fmt.Errorf("message svc.v1.MsgRegisterService does not contain field %s", fd.FullName()))
}
}
@ -995,15 +995,15 @@ func (x *fastReflection_MsgRegisterService) Has(fd protoreflect.FieldDescriptor)
// Clear is a mutating operation and unsafe for concurrent use.
func (x *fastReflection_MsgRegisterService) Clear(fd protoreflect.FieldDescriptor) {
switch fd.FullName() {
case "service.v1.MsgRegisterService.controller":
case "svc.v1.MsgRegisterService.controller":
x.Controller = ""
case "service.v1.MsgRegisterService.service":
case "svc.v1.MsgRegisterService.service":
x.Service = nil
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: service.v1.MsgRegisterService"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: svc.v1.MsgRegisterService"))
}
panic(fmt.Errorf("message service.v1.MsgRegisterService does not contain field %s", fd.FullName()))
panic(fmt.Errorf("message svc.v1.MsgRegisterService does not contain field %s", fd.FullName()))
}
}
@ -1015,17 +1015,17 @@ func (x *fastReflection_MsgRegisterService) Clear(fd protoreflect.FieldDescripto
// of the value; to obtain a mutable reference, use Mutable.
func (x *fastReflection_MsgRegisterService) Get(descriptor protoreflect.FieldDescriptor) protoreflect.Value {
switch descriptor.FullName() {
case "service.v1.MsgRegisterService.controller":
case "svc.v1.MsgRegisterService.controller":
value := x.Controller
return protoreflect.ValueOfString(value)
case "service.v1.MsgRegisterService.service":
case "svc.v1.MsgRegisterService.service":
value := x.Service
return protoreflect.ValueOfMessage(value.ProtoReflect())
default:
if descriptor.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: service.v1.MsgRegisterService"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: svc.v1.MsgRegisterService"))
}
panic(fmt.Errorf("message service.v1.MsgRegisterService does not contain field %s", descriptor.FullName()))
panic(fmt.Errorf("message svc.v1.MsgRegisterService does not contain field %s", descriptor.FullName()))
}
}
@ -1041,15 +1041,15 @@ func (x *fastReflection_MsgRegisterService) Get(descriptor protoreflect.FieldDes
// Set is a mutating operation and unsafe for concurrent use.
func (x *fastReflection_MsgRegisterService) Set(fd protoreflect.FieldDescriptor, value protoreflect.Value) {
switch fd.FullName() {
case "service.v1.MsgRegisterService.controller":
case "svc.v1.MsgRegisterService.controller":
x.Controller = value.Interface().(string)
case "service.v1.MsgRegisterService.service":
case "svc.v1.MsgRegisterService.service":
x.Service = value.Message().Interface().(*Service)
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: service.v1.MsgRegisterService"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: svc.v1.MsgRegisterService"))
}
panic(fmt.Errorf("message service.v1.MsgRegisterService does not contain field %s", fd.FullName()))
panic(fmt.Errorf("message svc.v1.MsgRegisterService does not contain field %s", fd.FullName()))
}
}
@ -1065,18 +1065,18 @@ func (x *fastReflection_MsgRegisterService) Set(fd protoreflect.FieldDescriptor,
// Mutable is a mutating operation and unsafe for concurrent use.
func (x *fastReflection_MsgRegisterService) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value {
switch fd.FullName() {
case "service.v1.MsgRegisterService.service":
case "svc.v1.MsgRegisterService.service":
if x.Service == nil {
x.Service = new(Service)
}
return protoreflect.ValueOfMessage(x.Service.ProtoReflect())
case "service.v1.MsgRegisterService.controller":
panic(fmt.Errorf("field controller of message service.v1.MsgRegisterService is not mutable"))
case "svc.v1.MsgRegisterService.controller":
panic(fmt.Errorf("field controller of message svc.v1.MsgRegisterService is not mutable"))
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: service.v1.MsgRegisterService"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: svc.v1.MsgRegisterService"))
}
panic(fmt.Errorf("message service.v1.MsgRegisterService does not contain field %s", fd.FullName()))
panic(fmt.Errorf("message svc.v1.MsgRegisterService does not contain field %s", fd.FullName()))
}
}
@ -1085,16 +1085,16 @@ func (x *fastReflection_MsgRegisterService) Mutable(fd protoreflect.FieldDescrip
// For lists, maps, and messages, this returns a new, empty, mutable value.
func (x *fastReflection_MsgRegisterService) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value {
switch fd.FullName() {
case "service.v1.MsgRegisterService.controller":
case "svc.v1.MsgRegisterService.controller":
return protoreflect.ValueOfString("")
case "service.v1.MsgRegisterService.service":
case "svc.v1.MsgRegisterService.service":
m := new(Service)
return protoreflect.ValueOfMessage(m.ProtoReflect())
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: service.v1.MsgRegisterService"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: svc.v1.MsgRegisterService"))
}
panic(fmt.Errorf("message service.v1.MsgRegisterService does not contain field %s", fd.FullName()))
panic(fmt.Errorf("message svc.v1.MsgRegisterService does not contain field %s", fd.FullName()))
}
}
@ -1104,7 +1104,7 @@ func (x *fastReflection_MsgRegisterService) NewField(fd protoreflect.FieldDescri
func (x *fastReflection_MsgRegisterService) WhichOneof(d protoreflect.OneofDescriptor) protoreflect.FieldDescriptor {
switch d.FullName() {
default:
panic(fmt.Errorf("%s is not a oneof field in service.v1.MsgRegisterService", d.FullName()))
panic(fmt.Errorf("%s is not a oneof field in svc.v1.MsgRegisterService", d.FullName()))
}
panic("unreachable")
}
@ -1376,8 +1376,8 @@ var (
)
func init() {
file_service_v1_tx_proto_init()
md_MsgRegisterServiceResponse = File_service_v1_tx_proto.Messages().ByName("MsgRegisterServiceResponse")
file_svc_v1_tx_proto_init()
md_MsgRegisterServiceResponse = File_svc_v1_tx_proto.Messages().ByName("MsgRegisterServiceResponse")
fd_MsgRegisterServiceResponse_success = md_MsgRegisterServiceResponse.Fields().ByName("success")
fd_MsgRegisterServiceResponse_did = md_MsgRegisterServiceResponse.Fields().ByName("did")
}
@ -1391,7 +1391,7 @@ func (x *MsgRegisterServiceResponse) ProtoReflect() protoreflect.Message {
}
func (x *MsgRegisterServiceResponse) slowProtoReflect() protoreflect.Message {
mi := &file_service_v1_tx_proto_msgTypes[3]
mi := &file_svc_v1_tx_proto_msgTypes[3]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@ -1474,15 +1474,15 @@ func (x *fastReflection_MsgRegisterServiceResponse) Range(f func(protoreflect.Fi
// a repeated field is populated if it is non-empty.
func (x *fastReflection_MsgRegisterServiceResponse) Has(fd protoreflect.FieldDescriptor) bool {
switch fd.FullName() {
case "service.v1.MsgRegisterServiceResponse.success":
case "svc.v1.MsgRegisterServiceResponse.success":
return x.Success != false
case "service.v1.MsgRegisterServiceResponse.did":
case "svc.v1.MsgRegisterServiceResponse.did":
return x.Did != ""
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: service.v1.MsgRegisterServiceResponse"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: svc.v1.MsgRegisterServiceResponse"))
}
panic(fmt.Errorf("message service.v1.MsgRegisterServiceResponse does not contain field %s", fd.FullName()))
panic(fmt.Errorf("message svc.v1.MsgRegisterServiceResponse does not contain field %s", fd.FullName()))
}
}
@ -1494,15 +1494,15 @@ func (x *fastReflection_MsgRegisterServiceResponse) Has(fd protoreflect.FieldDes
// Clear is a mutating operation and unsafe for concurrent use.
func (x *fastReflection_MsgRegisterServiceResponse) Clear(fd protoreflect.FieldDescriptor) {
switch fd.FullName() {
case "service.v1.MsgRegisterServiceResponse.success":
case "svc.v1.MsgRegisterServiceResponse.success":
x.Success = false
case "service.v1.MsgRegisterServiceResponse.did":
case "svc.v1.MsgRegisterServiceResponse.did":
x.Did = ""
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: service.v1.MsgRegisterServiceResponse"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: svc.v1.MsgRegisterServiceResponse"))
}
panic(fmt.Errorf("message service.v1.MsgRegisterServiceResponse does not contain field %s", fd.FullName()))
panic(fmt.Errorf("message svc.v1.MsgRegisterServiceResponse does not contain field %s", fd.FullName()))
}
}
@ -1514,17 +1514,17 @@ func (x *fastReflection_MsgRegisterServiceResponse) Clear(fd protoreflect.FieldD
// of the value; to obtain a mutable reference, use Mutable.
func (x *fastReflection_MsgRegisterServiceResponse) Get(descriptor protoreflect.FieldDescriptor) protoreflect.Value {
switch descriptor.FullName() {
case "service.v1.MsgRegisterServiceResponse.success":
case "svc.v1.MsgRegisterServiceResponse.success":
value := x.Success
return protoreflect.ValueOfBool(value)
case "service.v1.MsgRegisterServiceResponse.did":
case "svc.v1.MsgRegisterServiceResponse.did":
value := x.Did
return protoreflect.ValueOfString(value)
default:
if descriptor.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: service.v1.MsgRegisterServiceResponse"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: svc.v1.MsgRegisterServiceResponse"))
}
panic(fmt.Errorf("message service.v1.MsgRegisterServiceResponse does not contain field %s", descriptor.FullName()))
panic(fmt.Errorf("message svc.v1.MsgRegisterServiceResponse does not contain field %s", descriptor.FullName()))
}
}
@ -1540,15 +1540,15 @@ func (x *fastReflection_MsgRegisterServiceResponse) Get(descriptor protoreflect.
// Set is a mutating operation and unsafe for concurrent use.
func (x *fastReflection_MsgRegisterServiceResponse) Set(fd protoreflect.FieldDescriptor, value protoreflect.Value) {
switch fd.FullName() {
case "service.v1.MsgRegisterServiceResponse.success":
case "svc.v1.MsgRegisterServiceResponse.success":
x.Success = value.Bool()
case "service.v1.MsgRegisterServiceResponse.did":
case "svc.v1.MsgRegisterServiceResponse.did":
x.Did = value.Interface().(string)
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: service.v1.MsgRegisterServiceResponse"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: svc.v1.MsgRegisterServiceResponse"))
}
panic(fmt.Errorf("message service.v1.MsgRegisterServiceResponse does not contain field %s", fd.FullName()))
panic(fmt.Errorf("message svc.v1.MsgRegisterServiceResponse does not contain field %s", fd.FullName()))
}
}
@ -1564,15 +1564,15 @@ func (x *fastReflection_MsgRegisterServiceResponse) Set(fd protoreflect.FieldDes
// Mutable is a mutating operation and unsafe for concurrent use.
func (x *fastReflection_MsgRegisterServiceResponse) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value {
switch fd.FullName() {
case "service.v1.MsgRegisterServiceResponse.success":
panic(fmt.Errorf("field success of message service.v1.MsgRegisterServiceResponse is not mutable"))
case "service.v1.MsgRegisterServiceResponse.did":
panic(fmt.Errorf("field did of message service.v1.MsgRegisterServiceResponse is not mutable"))
case "svc.v1.MsgRegisterServiceResponse.success":
panic(fmt.Errorf("field success of message svc.v1.MsgRegisterServiceResponse is not mutable"))
case "svc.v1.MsgRegisterServiceResponse.did":
panic(fmt.Errorf("field did of message svc.v1.MsgRegisterServiceResponse is not mutable"))
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: service.v1.MsgRegisterServiceResponse"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: svc.v1.MsgRegisterServiceResponse"))
}
panic(fmt.Errorf("message service.v1.MsgRegisterServiceResponse does not contain field %s", fd.FullName()))
panic(fmt.Errorf("message svc.v1.MsgRegisterServiceResponse does not contain field %s", fd.FullName()))
}
}
@ -1581,15 +1581,15 @@ func (x *fastReflection_MsgRegisterServiceResponse) Mutable(fd protoreflect.Fiel
// For lists, maps, and messages, this returns a new, empty, mutable value.
func (x *fastReflection_MsgRegisterServiceResponse) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value {
switch fd.FullName() {
case "service.v1.MsgRegisterServiceResponse.success":
case "svc.v1.MsgRegisterServiceResponse.success":
return protoreflect.ValueOfBool(false)
case "service.v1.MsgRegisterServiceResponse.did":
case "svc.v1.MsgRegisterServiceResponse.did":
return protoreflect.ValueOfString("")
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: service.v1.MsgRegisterServiceResponse"))
panic(fmt.Errorf("proto3 declared messages do not support extensions: svc.v1.MsgRegisterServiceResponse"))
}
panic(fmt.Errorf("message service.v1.MsgRegisterServiceResponse does not contain field %s", fd.FullName()))
panic(fmt.Errorf("message svc.v1.MsgRegisterServiceResponse does not contain field %s", fd.FullName()))
}
}
@ -1599,7 +1599,7 @@ func (x *fastReflection_MsgRegisterServiceResponse) NewField(fd protoreflect.Fie
func (x *fastReflection_MsgRegisterServiceResponse) WhichOneof(d protoreflect.OneofDescriptor) protoreflect.FieldDescriptor {
switch d.FullName() {
default:
panic(fmt.Errorf("%s is not a oneof field in service.v1.MsgRegisterServiceResponse", d.FullName()))
panic(fmt.Errorf("%s is not a oneof field in svc.v1.MsgRegisterServiceResponse", d.FullName()))
}
panic("unreachable")
}
@ -1847,7 +1847,7 @@ func (x *fastReflection_MsgRegisterServiceResponse) ProtoMethods() *protoiface.M
// versions:
// protoc-gen-go v1.27.0
// protoc (unknown)
// source: service/v1/tx.proto
// source: svc/v1/tx.proto
const (
// Verify that this generated code is sufficiently up-to-date.
@ -1875,7 +1875,7 @@ type MsgUpdateParams struct {
func (x *MsgUpdateParams) Reset() {
*x = MsgUpdateParams{}
if protoimpl.UnsafeEnabled {
mi := &file_service_v1_tx_proto_msgTypes[0]
mi := &file_svc_v1_tx_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@ -1889,7 +1889,7 @@ func (*MsgUpdateParams) ProtoMessage() {}
// Deprecated: Use MsgUpdateParams.ProtoReflect.Descriptor instead.
func (*MsgUpdateParams) Descriptor() ([]byte, []int) {
return file_service_v1_tx_proto_rawDescGZIP(), []int{0}
return file_svc_v1_tx_proto_rawDescGZIP(), []int{0}
}
func (x *MsgUpdateParams) GetAuthority() string {
@ -1919,7 +1919,7 @@ type MsgUpdateParamsResponse struct {
func (x *MsgUpdateParamsResponse) Reset() {
*x = MsgUpdateParamsResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_service_v1_tx_proto_msgTypes[1]
mi := &file_svc_v1_tx_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@ -1933,7 +1933,7 @@ func (*MsgUpdateParamsResponse) ProtoMessage() {}
// Deprecated: Use MsgUpdateParamsResponse.ProtoReflect.Descriptor instead.
func (*MsgUpdateParamsResponse) Descriptor() ([]byte, []int) {
return file_service_v1_tx_proto_rawDescGZIP(), []int{1}
return file_svc_v1_tx_proto_rawDescGZIP(), []int{1}
}
// MsgRegisterService is the message type for the RegisterService RPC.
@ -1952,7 +1952,7 @@ type MsgRegisterService struct {
func (x *MsgRegisterService) Reset() {
*x = MsgRegisterService{}
if protoimpl.UnsafeEnabled {
mi := &file_service_v1_tx_proto_msgTypes[2]
mi := &file_svc_v1_tx_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@ -1966,7 +1966,7 @@ func (*MsgRegisterService) ProtoMessage() {}
// Deprecated: Use MsgRegisterService.ProtoReflect.Descriptor instead.
func (*MsgRegisterService) Descriptor() ([]byte, []int) {
return file_service_v1_tx_proto_rawDescGZIP(), []int{2}
return file_svc_v1_tx_proto_rawDescGZIP(), []int{2}
}
func (x *MsgRegisterService) GetController() string {
@ -1996,7 +1996,7 @@ type MsgRegisterServiceResponse struct {
func (x *MsgRegisterServiceResponse) Reset() {
*x = MsgRegisterServiceResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_service_v1_tx_proto_msgTypes[3]
mi := &file_svc_v1_tx_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@ -2010,7 +2010,7 @@ func (*MsgRegisterServiceResponse) ProtoMessage() {}
// Deprecated: Use MsgRegisterServiceResponse.ProtoReflect.Descriptor instead.
func (*MsgRegisterServiceResponse) Descriptor() ([]byte, []int) {
return file_service_v1_tx_proto_rawDescGZIP(), []int{3}
return file_svc_v1_tx_proto_rawDescGZIP(), []int{3}
}
func (x *MsgRegisterServiceResponse) GetSuccess() bool {
@ -2027,94 +2027,90 @@ func (x *MsgRegisterServiceResponse) GetDid() string {
return ""
}
var File_service_v1_tx_proto protoreflect.FileDescriptor
var File_svc_v1_tx_proto protoreflect.FileDescriptor
var file_service_v1_tx_proto_rawDesc = []byte{
0x0a, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x74, 0x78, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76,
0x31, 0x1a, 0x17, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2f, 0x6d, 0x73, 0x67, 0x2f, 0x76, 0x31,
0x2f, 0x6d, 0x73, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x63, 0x6f, 0x73, 0x6d,
0x6f, 0x73, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x14, 0x67, 0x6f, 0x67, 0x6f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x2f, 0x67, 0x6f, 0x67, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x18, 0x73, 0x65, 0x72,
0x76, 0x69, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x67, 0x65, 0x6e, 0x65, 0x73, 0x69, 0x73, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8b, 0x01, 0x0a, 0x0f, 0x4d, 0x73, 0x67, 0x55, 0x70, 0x64,
0x61, 0x74, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x36, 0x0a, 0x09, 0x61, 0x75, 0x74,
0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x18, 0xd2, 0xb4,
0x2d, 0x14, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73,
0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74,
0x79, 0x12, 0x30, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x12, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50,
var file_svc_v1_tx_proto_rawDesc = []byte{
0x0a, 0x0f, 0x73, 0x76, 0x63, 0x2f, 0x76, 0x31, 0x2f, 0x74, 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x12, 0x06, 0x73, 0x76, 0x63, 0x2e, 0x76, 0x31, 0x1a, 0x17, 0x63, 0x6f, 0x73, 0x6d, 0x6f,
0x73, 0x2f, 0x6d, 0x73, 0x67, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x73, 0x67, 0x2e, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x1a, 0x14, 0x73, 0x76, 0x63, 0x2f, 0x76, 0x31, 0x2f, 0x67, 0x65, 0x6e, 0x65, 0x73,
0x69, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x14, 0x67, 0x6f, 0x67, 0x6f, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x67, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19,
0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x73,
0x6d, 0x6f, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x87, 0x01, 0x0a, 0x0f, 0x4d, 0x73,
0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x36, 0x0a,
0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
0x42, 0x18, 0xd2, 0xb4, 0x2d, 0x14, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2e, 0x41, 0x64, 0x64,
0x72, 0x65, 0x73, 0x73, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x09, 0x61, 0x75, 0x74, 0x68,
0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x2c, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18,
0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x73, 0x76, 0x63, 0x2e, 0x76, 0x31, 0x2e, 0x50,
0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x04, 0xc8, 0xde, 0x1f, 0x00, 0x52, 0x06, 0x70, 0x61, 0x72,
0x61, 0x6d, 0x73, 0x3a, 0x0e, 0x82, 0xe7, 0xb0, 0x2a, 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72,
0x69, 0x74, 0x79, 0x22, 0x19, 0x0a, 0x17, 0x4d, 0x73, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65,
0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x8e,
0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x8a,
0x01, 0x0a, 0x12, 0x4d, 0x73, 0x67, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x53, 0x65,
0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x38, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
0x6c, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x18, 0xd2, 0xb4, 0x2d, 0x14, 0x63,
0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x53, 0x74, 0x72,
0x69, 0x6e, 0x67, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x12,
0x2d, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
0x32, 0x13, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65,
0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x3a, 0x0f,
0x82, 0xe7, 0xb0, 0x2a, 0x0a, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x22,
0x48, 0x0a, 0x1a, 0x4d, 0x73, 0x67, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x53, 0x65,
0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a,
0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07,
0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x64, 0x69, 0x64, 0x18, 0x02,
0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x64, 0x69, 0x64, 0x32, 0xb9, 0x01, 0x0a, 0x03, 0x4d, 0x73,
0x67, 0x12, 0x50, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d,
0x73, 0x12, 0x1b, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4d,
0x73, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x23,
0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x73, 0x67, 0x55,
0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
0x6e, 0x73, 0x65, 0x12, 0x59, 0x0a, 0x0f, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x53,
0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x1e, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
0x29, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
0x32, 0x0f, 0x2e, 0x73, 0x76, 0x63, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
0x65, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x3a, 0x0f, 0x82, 0xe7, 0xb0, 0x2a,
0x0a, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x22, 0x48, 0x0a, 0x1a, 0x4d,
0x73, 0x67, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63,
0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63,
0x65, 0x73, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x64, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
0x52, 0x03, 0x64, 0x69, 0x64, 0x32, 0xa9, 0x01, 0x0a, 0x03, 0x4d, 0x73, 0x67, 0x12, 0x48, 0x0a,
0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x17, 0x2e,
0x73, 0x76, 0x63, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x73, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65,
0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x1f, 0x2e, 0x73, 0x76, 0x63, 0x2e, 0x76, 0x31, 0x2e,
0x4d, 0x73, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52,
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x0f, 0x52, 0x65, 0x67, 0x69, 0x73,
0x74, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x1a, 0x2e, 0x73, 0x76, 0x63,
0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x73, 0x67, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x53,
0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x1a, 0x26, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x73, 0x67, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x53,
0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x1a, 0x05,
0x80, 0xe7, 0xb0, 0x2a, 0x01, 0x42, 0x93, 0x01, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x73, 0x65,
0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x07, 0x54, 0x78, 0x50, 0x72, 0x6f, 0x74,
0x6f, 0x50, 0x01, 0x5a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
0x6f, 0x6e, 0x73, 0x6f, 0x6e, 0x72, 0x2f, 0x73, 0x6f, 0x6e, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f,
0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x3b, 0x73, 0x65, 0x72, 0x76, 0x69,
0x63, 0x65, 0x76, 0x31, 0xa2, 0x02, 0x03, 0x53, 0x58, 0x58, 0xaa, 0x02, 0x0a, 0x53, 0x65, 0x72,
0x76, 0x69, 0x63, 0x65, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x0a, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
0x65, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5c, 0x56,
0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0b,
0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x33,
0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x1a, 0x22, 0x2e, 0x73, 0x76, 0x63, 0x2e, 0x76, 0x31, 0x2e,
0x4d, 0x73, 0x67, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69,
0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x1a, 0x05, 0x80, 0xe7, 0xb0, 0x2a,
0x01, 0x42, 0x77, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x2e, 0x73, 0x76, 0x63, 0x2e, 0x76, 0x31, 0x42,
0x07, 0x54, 0x78, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68,
0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6f, 0x6e, 0x73, 0x6f, 0x6e, 0x72, 0x2f, 0x73, 0x6f,
0x6e, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x76, 0x63, 0x2f, 0x76, 0x31, 0x3b, 0x73, 0x76,
0x63, 0x76, 0x31, 0xa2, 0x02, 0x03, 0x53, 0x58, 0x58, 0xaa, 0x02, 0x06, 0x53, 0x76, 0x63, 0x2e,
0x56, 0x31, 0xca, 0x02, 0x06, 0x53, 0x76, 0x63, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x12, 0x53, 0x76,
0x63, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61,
0xea, 0x02, 0x07, 0x53, 0x76, 0x63, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x33,
}
var (
file_service_v1_tx_proto_rawDescOnce sync.Once
file_service_v1_tx_proto_rawDescData = file_service_v1_tx_proto_rawDesc
file_svc_v1_tx_proto_rawDescOnce sync.Once
file_svc_v1_tx_proto_rawDescData = file_svc_v1_tx_proto_rawDesc
)
func file_service_v1_tx_proto_rawDescGZIP() []byte {
file_service_v1_tx_proto_rawDescOnce.Do(func() {
file_service_v1_tx_proto_rawDescData = protoimpl.X.CompressGZIP(file_service_v1_tx_proto_rawDescData)
func file_svc_v1_tx_proto_rawDescGZIP() []byte {
file_svc_v1_tx_proto_rawDescOnce.Do(func() {
file_svc_v1_tx_proto_rawDescData = protoimpl.X.CompressGZIP(file_svc_v1_tx_proto_rawDescData)
})
return file_service_v1_tx_proto_rawDescData
return file_svc_v1_tx_proto_rawDescData
}
var file_service_v1_tx_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
var file_service_v1_tx_proto_goTypes = []interface{}{
(*MsgUpdateParams)(nil), // 0: service.v1.MsgUpdateParams
(*MsgUpdateParamsResponse)(nil), // 1: service.v1.MsgUpdateParamsResponse
(*MsgRegisterService)(nil), // 2: service.v1.MsgRegisterService
(*MsgRegisterServiceResponse)(nil), // 3: service.v1.MsgRegisterServiceResponse
(*Params)(nil), // 4: service.v1.Params
(*Service)(nil), // 5: service.v1.Service
var file_svc_v1_tx_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
var file_svc_v1_tx_proto_goTypes = []interface{}{
(*MsgUpdateParams)(nil), // 0: svc.v1.MsgUpdateParams
(*MsgUpdateParamsResponse)(nil), // 1: svc.v1.MsgUpdateParamsResponse
(*MsgRegisterService)(nil), // 2: svc.v1.MsgRegisterService
(*MsgRegisterServiceResponse)(nil), // 3: svc.v1.MsgRegisterServiceResponse
(*Params)(nil), // 4: svc.v1.Params
(*Service)(nil), // 5: svc.v1.Service
}
var file_service_v1_tx_proto_depIdxs = []int32{
4, // 0: service.v1.MsgUpdateParams.params:type_name -> service.v1.Params
5, // 1: service.v1.MsgRegisterService.service:type_name -> service.v1.Service
0, // 2: service.v1.Msg.UpdateParams:input_type -> service.v1.MsgUpdateParams
2, // 3: service.v1.Msg.RegisterService:input_type -> service.v1.MsgRegisterService
1, // 4: service.v1.Msg.UpdateParams:output_type -> service.v1.MsgUpdateParamsResponse
3, // 5: service.v1.Msg.RegisterService:output_type -> service.v1.MsgRegisterServiceResponse
var file_svc_v1_tx_proto_depIdxs = []int32{
4, // 0: svc.v1.MsgUpdateParams.params:type_name -> svc.v1.Params
5, // 1: svc.v1.MsgRegisterService.service:type_name -> svc.v1.Service
0, // 2: svc.v1.Msg.UpdateParams:input_type -> svc.v1.MsgUpdateParams
2, // 3: svc.v1.Msg.RegisterService:input_type -> svc.v1.MsgRegisterService
1, // 4: svc.v1.Msg.UpdateParams:output_type -> svc.v1.MsgUpdateParamsResponse
3, // 5: svc.v1.Msg.RegisterService:output_type -> svc.v1.MsgRegisterServiceResponse
4, // [4:6] is the sub-list for method output_type
2, // [2:4] is the sub-list for method input_type
2, // [2:2] is the sub-list for extension type_name
@ -2122,14 +2118,14 @@ var file_service_v1_tx_proto_depIdxs = []int32{
0, // [0:2] is the sub-list for field type_name
}
func init() { file_service_v1_tx_proto_init() }
func file_service_v1_tx_proto_init() {
if File_service_v1_tx_proto != nil {
func init() { file_svc_v1_tx_proto_init() }
func file_svc_v1_tx_proto_init() {
if File_svc_v1_tx_proto != nil {
return
}
file_service_v1_genesis_proto_init()
file_svc_v1_genesis_proto_init()
if !protoimpl.UnsafeEnabled {
file_service_v1_tx_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
file_svc_v1_tx_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*MsgUpdateParams); i {
case 0:
return &v.state
@ -2141,7 +2137,7 @@ func file_service_v1_tx_proto_init() {
return nil
}
}
file_service_v1_tx_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
file_svc_v1_tx_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*MsgUpdateParamsResponse); i {
case 0:
return &v.state
@ -2153,7 +2149,7 @@ func file_service_v1_tx_proto_init() {
return nil
}
}
file_service_v1_tx_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
file_svc_v1_tx_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*MsgRegisterService); i {
case 0:
return &v.state
@ -2165,7 +2161,7 @@ func file_service_v1_tx_proto_init() {
return nil
}
}
file_service_v1_tx_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
file_svc_v1_tx_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*MsgRegisterServiceResponse); i {
case 0:
return &v.state
@ -2182,18 +2178,18 @@ func file_service_v1_tx_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_service_v1_tx_proto_rawDesc,
RawDescriptor: file_svc_v1_tx_proto_rawDesc,
NumEnums: 0,
NumMessages: 4,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_service_v1_tx_proto_goTypes,
DependencyIndexes: file_service_v1_tx_proto_depIdxs,
MessageInfos: file_service_v1_tx_proto_msgTypes,
GoTypes: file_svc_v1_tx_proto_goTypes,
DependencyIndexes: file_svc_v1_tx_proto_depIdxs,
MessageInfos: file_svc_v1_tx_proto_msgTypes,
}.Build()
File_service_v1_tx_proto = out.File
file_service_v1_tx_proto_rawDesc = nil
file_service_v1_tx_proto_goTypes = nil
file_service_v1_tx_proto_depIdxs = nil
File_svc_v1_tx_proto = out.File
file_svc_v1_tx_proto_rawDesc = nil
file_svc_v1_tx_proto_goTypes = nil
file_svc_v1_tx_proto_depIdxs = nil
}

View File

@ -2,9 +2,9 @@
// versions:
// - protoc-gen-go-grpc v1.5.1
// - protoc (unknown)
// source: service/v1/tx.proto
// source: svc/v1/tx.proto
package servicev1
package svcv1
import (
context "context"
@ -19,8 +19,8 @@ import (
const _ = grpc.SupportPackageIsVersion9
const (
Msg_UpdateParams_FullMethodName = "/service.v1.Msg/UpdateParams"
Msg_RegisterService_FullMethodName = "/service.v1.Msg/RegisterService"
Msg_UpdateParams_FullMethodName = "/svc.v1.Msg/UpdateParams"
Msg_RegisterService_FullMethodName = "/svc.v1.Msg/RegisterService"
)
// MsgClient is the client API for Msg service.
@ -156,7 +156,7 @@ func _Msg_RegisterService_Handler(srv interface{}, ctx context.Context, dec func
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var Msg_ServiceDesc = grpc.ServiceDesc{
ServiceName: "service.v1.Msg",
ServiceName: "svc.v1.Msg",
HandlerType: (*MsgServer)(nil),
Methods: []grpc.MethodDesc{
{
@ -169,5 +169,5 @@ var Msg_ServiceDesc = grpc.ServiceDesc{
},
},
Streams: []grpc.StreamDesc{},
Metadata: "service/v1/tx.proto",
Metadata: "svc/v1/tx.proto",
}

File diff suppressed because it is too large Load Diff

View File

@ -1,235 +0,0 @@
// Code generated by protoc-gen-go-cosmos-orm. DO NOT EDIT.
package vaultv1
import (
context "context"
ormlist "cosmossdk.io/orm/model/ormlist"
ormtable "cosmossdk.io/orm/model/ormtable"
ormerrors "cosmossdk.io/orm/types/ormerrors"
)
type DWNTable interface {
Insert(ctx context.Context, dWN *DWN) error
InsertReturningId(ctx context.Context, dWN *DWN) (uint64, error)
LastInsertedSequence(ctx context.Context) (uint64, error)
Update(ctx context.Context, dWN *DWN) error
Save(ctx context.Context, dWN *DWN) error
Delete(ctx context.Context, dWN *DWN) error
Has(ctx context.Context, id uint64) (found bool, err error)
// Get returns nil and an error which responds true to ormerrors.IsNotFound() if the record was not found.
Get(ctx context.Context, id uint64) (*DWN, error)
HasByAlias(ctx context.Context, alias string) (found bool, err error)
// GetByAlias returns nil and an error which responds true to ormerrors.IsNotFound() if the record was not found.
GetByAlias(ctx context.Context, alias string) (*DWN, error)
HasByCid(ctx context.Context, cid string) (found bool, err error)
// GetByCid returns nil and an error which responds true to ormerrors.IsNotFound() if the record was not found.
GetByCid(ctx context.Context, cid string) (*DWN, error)
List(ctx context.Context, prefixKey DWNIndexKey, opts ...ormlist.Option) (DWNIterator, error)
ListRange(ctx context.Context, from, to DWNIndexKey, opts ...ormlist.Option) (DWNIterator, error)
DeleteBy(ctx context.Context, prefixKey DWNIndexKey) error
DeleteRange(ctx context.Context, from, to DWNIndexKey) error
doNotImplement()
}
type DWNIterator struct {
ormtable.Iterator
}
func (i DWNIterator) Value() (*DWN, error) {
var dWN DWN
err := i.UnmarshalMessage(&dWN)
return &dWN, err
}
type DWNIndexKey interface {
id() uint32
values() []interface{}
dWNIndexKey()
}
// primary key starting index..
type DWNPrimaryKey = DWNIdIndexKey
type DWNIdIndexKey struct {
vs []interface{}
}
func (x DWNIdIndexKey) id() uint32 { return 0 }
func (x DWNIdIndexKey) values() []interface{} { return x.vs }
func (x DWNIdIndexKey) dWNIndexKey() {}
func (this DWNIdIndexKey) WithId(id uint64) DWNIdIndexKey {
this.vs = []interface{}{id}
return this
}
type DWNAliasIndexKey struct {
vs []interface{}
}
func (x DWNAliasIndexKey) id() uint32 { return 1 }
func (x DWNAliasIndexKey) values() []interface{} { return x.vs }
func (x DWNAliasIndexKey) dWNIndexKey() {}
func (this DWNAliasIndexKey) WithAlias(alias string) DWNAliasIndexKey {
this.vs = []interface{}{alias}
return this
}
type DWNCidIndexKey struct {
vs []interface{}
}
func (x DWNCidIndexKey) id() uint32 { return 2 }
func (x DWNCidIndexKey) values() []interface{} { return x.vs }
func (x DWNCidIndexKey) dWNIndexKey() {}
func (this DWNCidIndexKey) WithCid(cid string) DWNCidIndexKey {
this.vs = []interface{}{cid}
return this
}
type dWNTable struct {
table ormtable.AutoIncrementTable
}
func (this dWNTable) Insert(ctx context.Context, dWN *DWN) error {
return this.table.Insert(ctx, dWN)
}
func (this dWNTable) Update(ctx context.Context, dWN *DWN) error {
return this.table.Update(ctx, dWN)
}
func (this dWNTable) Save(ctx context.Context, dWN *DWN) error {
return this.table.Save(ctx, dWN)
}
func (this dWNTable) Delete(ctx context.Context, dWN *DWN) error {
return this.table.Delete(ctx, dWN)
}
func (this dWNTable) InsertReturningId(ctx context.Context, dWN *DWN) (uint64, error) {
return this.table.InsertReturningPKey(ctx, dWN)
}
func (this dWNTable) LastInsertedSequence(ctx context.Context) (uint64, error) {
return this.table.LastInsertedSequence(ctx)
}
func (this dWNTable) Has(ctx context.Context, id uint64) (found bool, err error) {
return this.table.PrimaryKey().Has(ctx, id)
}
func (this dWNTable) Get(ctx context.Context, id uint64) (*DWN, error) {
var dWN DWN
found, err := this.table.PrimaryKey().Get(ctx, &dWN, id)
if err != nil {
return nil, err
}
if !found {
return nil, ormerrors.NotFound
}
return &dWN, nil
}
func (this dWNTable) HasByAlias(ctx context.Context, alias string) (found bool, err error) {
return this.table.GetIndexByID(1).(ormtable.UniqueIndex).Has(ctx,
alias,
)
}
func (this dWNTable) GetByAlias(ctx context.Context, alias string) (*DWN, error) {
var dWN DWN
found, err := this.table.GetIndexByID(1).(ormtable.UniqueIndex).Get(ctx, &dWN,
alias,
)
if err != nil {
return nil, err
}
if !found {
return nil, ormerrors.NotFound
}
return &dWN, nil
}
func (this dWNTable) HasByCid(ctx context.Context, cid string) (found bool, err error) {
return this.table.GetIndexByID(2).(ormtable.UniqueIndex).Has(ctx,
cid,
)
}
func (this dWNTable) GetByCid(ctx context.Context, cid string) (*DWN, error) {
var dWN DWN
found, err := this.table.GetIndexByID(2).(ormtable.UniqueIndex).Get(ctx, &dWN,
cid,
)
if err != nil {
return nil, err
}
if !found {
return nil, ormerrors.NotFound
}
return &dWN, nil
}
func (this dWNTable) List(ctx context.Context, prefixKey DWNIndexKey, opts ...ormlist.Option) (DWNIterator, error) {
it, err := this.table.GetIndexByID(prefixKey.id()).List(ctx, prefixKey.values(), opts...)
return DWNIterator{it}, err
}
func (this dWNTable) ListRange(ctx context.Context, from, to DWNIndexKey, opts ...ormlist.Option) (DWNIterator, error) {
it, err := this.table.GetIndexByID(from.id()).ListRange(ctx, from.values(), to.values(), opts...)
return DWNIterator{it}, err
}
func (this dWNTable) DeleteBy(ctx context.Context, prefixKey DWNIndexKey) error {
return this.table.GetIndexByID(prefixKey.id()).DeleteBy(ctx, prefixKey.values()...)
}
func (this dWNTable) DeleteRange(ctx context.Context, from, to DWNIndexKey) error {
return this.table.GetIndexByID(from.id()).DeleteRange(ctx, from.values(), to.values())
}
func (this dWNTable) doNotImplement() {}
var _ DWNTable = dWNTable{}
func NewDWNTable(db ormtable.Schema) (DWNTable, error) {
table := db.GetTable(&DWN{})
if table == nil {
return nil, ormerrors.TableNotFound.Wrap(string((&DWN{}).ProtoReflect().Descriptor().FullName()))
}
return dWNTable{table.(ormtable.AutoIncrementTable)}, nil
}
type StateStore interface {
DWNTable() DWNTable
doNotImplement()
}
type stateStore struct {
dWN DWNTable
}
func (x stateStore) DWNTable() DWNTable {
return x.dWN
}
func (stateStore) doNotImplement() {}
var _ StateStore = stateStore{}
func NewStateStore(db ormtable.Schema) (StateStore, error) {
dWNTable, err := NewDWNTable(db)
if err != nil {
return nil, err
}
return stateStore{
dWNTable,
}, nil
}

View File

@ -1,772 +0,0 @@
// Code generated by protoc-gen-go-pulsar. DO NOT EDIT.
package vaultv1
import (
_ "cosmossdk.io/api/cosmos/orm/v1"
fmt "fmt"
runtime "github.com/cosmos/cosmos-proto/runtime"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoiface "google.golang.org/protobuf/runtime/protoiface"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
io "io"
reflect "reflect"
sync "sync"
)
var (
md_DWN protoreflect.MessageDescriptor
fd_DWN_id protoreflect.FieldDescriptor
fd_DWN_alias protoreflect.FieldDescriptor
fd_DWN_cid protoreflect.FieldDescriptor
fd_DWN_resolver protoreflect.FieldDescriptor
)
func init() {
file_vault_v1_state_proto_init()
md_DWN = File_vault_v1_state_proto.Messages().ByName("DWN")
fd_DWN_id = md_DWN.Fields().ByName("id")
fd_DWN_alias = md_DWN.Fields().ByName("alias")
fd_DWN_cid = md_DWN.Fields().ByName("cid")
fd_DWN_resolver = md_DWN.Fields().ByName("resolver")
}
var _ protoreflect.Message = (*fastReflection_DWN)(nil)
type fastReflection_DWN DWN
func (x *DWN) ProtoReflect() protoreflect.Message {
return (*fastReflection_DWN)(x)
}
func (x *DWN) slowProtoReflect() protoreflect.Message {
mi := &file_vault_v1_state_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
var _fastReflection_DWN_messageType fastReflection_DWN_messageType
var _ protoreflect.MessageType = fastReflection_DWN_messageType{}
type fastReflection_DWN_messageType struct{}
func (x fastReflection_DWN_messageType) Zero() protoreflect.Message {
return (*fastReflection_DWN)(nil)
}
func (x fastReflection_DWN_messageType) New() protoreflect.Message {
return new(fastReflection_DWN)
}
func (x fastReflection_DWN_messageType) Descriptor() protoreflect.MessageDescriptor {
return md_DWN
}
// Descriptor returns message descriptor, which contains only the protobuf
// type information for the message.
func (x *fastReflection_DWN) Descriptor() protoreflect.MessageDescriptor {
return md_DWN
}
// Type returns the message type, which encapsulates both Go and protobuf
// type information. If the Go type information is not needed,
// it is recommended that the message descriptor be used instead.
func (x *fastReflection_DWN) Type() protoreflect.MessageType {
return _fastReflection_DWN_messageType
}
// New returns a newly allocated and mutable empty message.
func (x *fastReflection_DWN) New() protoreflect.Message {
return new(fastReflection_DWN)
}
// Interface unwraps the message reflection interface and
// returns the underlying ProtoMessage interface.
func (x *fastReflection_DWN) Interface() protoreflect.ProtoMessage {
return (*DWN)(x)
}
// Range iterates over every populated field in an undefined order,
// calling f for each field descriptor and value encountered.
// Range returns immediately if f returns false.
// While iterating, mutating operations may only be performed
// on the current field descriptor.
func (x *fastReflection_DWN) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) {
if x.Id != uint64(0) {
value := protoreflect.ValueOfUint64(x.Id)
if !f(fd_DWN_id, value) {
return
}
}
if x.Alias != "" {
value := protoreflect.ValueOfString(x.Alias)
if !f(fd_DWN_alias, value) {
return
}
}
if x.Cid != "" {
value := protoreflect.ValueOfString(x.Cid)
if !f(fd_DWN_cid, value) {
return
}
}
if x.Resolver != "" {
value := protoreflect.ValueOfString(x.Resolver)
if !f(fd_DWN_resolver, value) {
return
}
}
}
// Has reports whether a field is populated.
//
// Some fields have the property of nullability where it is possible to
// distinguish between the default value of a field and whether the field
// was explicitly populated with the default value. Singular message fields,
// member fields of a oneof, and proto2 scalar fields are nullable. Such
// fields are populated only if explicitly set.
//
// In other cases (aside from the nullable cases above),
// a proto3 scalar field is populated if it contains a non-zero value, and
// a repeated field is populated if it is non-empty.
func (x *fastReflection_DWN) Has(fd protoreflect.FieldDescriptor) bool {
switch fd.FullName() {
case "vault.v1.DWN.id":
return x.Id != uint64(0)
case "vault.v1.DWN.alias":
return x.Alias != ""
case "vault.v1.DWN.cid":
return x.Cid != ""
case "vault.v1.DWN.resolver":
return x.Resolver != ""
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: vault.v1.DWN"))
}
panic(fmt.Errorf("message vault.v1.DWN does not contain field %s", fd.FullName()))
}
}
// Clear clears the field such that a subsequent Has call reports false.
//
// Clearing an extension field clears both the extension type and value
// associated with the given field number.
//
// Clear is a mutating operation and unsafe for concurrent use.
func (x *fastReflection_DWN) Clear(fd protoreflect.FieldDescriptor) {
switch fd.FullName() {
case "vault.v1.DWN.id":
x.Id = uint64(0)
case "vault.v1.DWN.alias":
x.Alias = ""
case "vault.v1.DWN.cid":
x.Cid = ""
case "vault.v1.DWN.resolver":
x.Resolver = ""
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: vault.v1.DWN"))
}
panic(fmt.Errorf("message vault.v1.DWN does not contain field %s", fd.FullName()))
}
}
// Get retrieves the value for a field.
//
// For unpopulated scalars, it returns the default value, where
// the default value of a bytes scalar is guaranteed to be a copy.
// For unpopulated composite types, it returns an empty, read-only view
// of the value; to obtain a mutable reference, use Mutable.
func (x *fastReflection_DWN) Get(descriptor protoreflect.FieldDescriptor) protoreflect.Value {
switch descriptor.FullName() {
case "vault.v1.DWN.id":
value := x.Id
return protoreflect.ValueOfUint64(value)
case "vault.v1.DWN.alias":
value := x.Alias
return protoreflect.ValueOfString(value)
case "vault.v1.DWN.cid":
value := x.Cid
return protoreflect.ValueOfString(value)
case "vault.v1.DWN.resolver":
value := x.Resolver
return protoreflect.ValueOfString(value)
default:
if descriptor.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: vault.v1.DWN"))
}
panic(fmt.Errorf("message vault.v1.DWN does not contain field %s", descriptor.FullName()))
}
}
// Set stores the value for a field.
//
// For a field belonging to a oneof, it implicitly clears any other field
// that may be currently set within the same oneof.
// For extension fields, it implicitly stores the provided ExtensionType.
// When setting a composite type, it is unspecified whether the stored value
// aliases the source's memory in any way. If the composite value is an
// empty, read-only value, then it panics.
//
// Set is a mutating operation and unsafe for concurrent use.
func (x *fastReflection_DWN) Set(fd protoreflect.FieldDescriptor, value protoreflect.Value) {
switch fd.FullName() {
case "vault.v1.DWN.id":
x.Id = value.Uint()
case "vault.v1.DWN.alias":
x.Alias = value.Interface().(string)
case "vault.v1.DWN.cid":
x.Cid = value.Interface().(string)
case "vault.v1.DWN.resolver":
x.Resolver = value.Interface().(string)
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: vault.v1.DWN"))
}
panic(fmt.Errorf("message vault.v1.DWN does not contain field %s", fd.FullName()))
}
}
// Mutable returns a mutable reference to a composite type.
//
// If the field is unpopulated, it may allocate a composite value.
// For a field belonging to a oneof, it implicitly clears any other field
// that may be currently set within the same oneof.
// For extension fields, it implicitly stores the provided ExtensionType
// if not already stored.
// It panics if the field does not contain a composite type.
//
// Mutable is a mutating operation and unsafe for concurrent use.
func (x *fastReflection_DWN) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value {
switch fd.FullName() {
case "vault.v1.DWN.id":
panic(fmt.Errorf("field id of message vault.v1.DWN is not mutable"))
case "vault.v1.DWN.alias":
panic(fmt.Errorf("field alias of message vault.v1.DWN is not mutable"))
case "vault.v1.DWN.cid":
panic(fmt.Errorf("field cid of message vault.v1.DWN is not mutable"))
case "vault.v1.DWN.resolver":
panic(fmt.Errorf("field resolver of message vault.v1.DWN is not mutable"))
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: vault.v1.DWN"))
}
panic(fmt.Errorf("message vault.v1.DWN does not contain field %s", fd.FullName()))
}
}
// NewField returns a new value that is assignable to the field
// for the given descriptor. For scalars, this returns the default value.
// For lists, maps, and messages, this returns a new, empty, mutable value.
func (x *fastReflection_DWN) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value {
switch fd.FullName() {
case "vault.v1.DWN.id":
return protoreflect.ValueOfUint64(uint64(0))
case "vault.v1.DWN.alias":
return protoreflect.ValueOfString("")
case "vault.v1.DWN.cid":
return protoreflect.ValueOfString("")
case "vault.v1.DWN.resolver":
return protoreflect.ValueOfString("")
default:
if fd.IsExtension() {
panic(fmt.Errorf("proto3 declared messages do not support extensions: vault.v1.DWN"))
}
panic(fmt.Errorf("message vault.v1.DWN does not contain field %s", fd.FullName()))
}
}
// WhichOneof reports which field within the oneof is populated,
// returning nil if none are populated.
// It panics if the oneof descriptor does not belong to this message.
func (x *fastReflection_DWN) WhichOneof(d protoreflect.OneofDescriptor) protoreflect.FieldDescriptor {
switch d.FullName() {
default:
panic(fmt.Errorf("%s is not a oneof field in vault.v1.DWN", d.FullName()))
}
panic("unreachable")
}
// GetUnknown retrieves the entire list of unknown fields.
// The caller may only mutate the contents of the RawFields
// if the mutated bytes are stored back into the message with SetUnknown.
func (x *fastReflection_DWN) GetUnknown() protoreflect.RawFields {
return x.unknownFields
}
// SetUnknown stores an entire list of unknown fields.
// The raw fields must be syntactically valid according to the wire format.
// An implementation may panic if this is not the case.
// Once stored, the caller must not mutate the content of the RawFields.
// An empty RawFields may be passed to clear the fields.
//
// SetUnknown is a mutating operation and unsafe for concurrent use.
func (x *fastReflection_DWN) SetUnknown(fields protoreflect.RawFields) {
x.unknownFields = fields
}
// IsValid reports whether the message is valid.
//
// An invalid message is an empty, read-only value.
//
// An invalid message often corresponds to a nil pointer of the concrete
// message type, but the details are implementation dependent.
// Validity is not part of the protobuf data model, and may not
// be preserved in marshaling or other operations.
func (x *fastReflection_DWN) IsValid() bool {
return x != nil
}
// ProtoMethods returns optional fastReflectionFeature-path implementations of various operations.
// This method may return nil.
//
// The returned methods type is identical to
// "google.golang.org/protobuf/runtime/protoiface".Methods.
// Consult the protoiface package documentation for details.
func (x *fastReflection_DWN) ProtoMethods() *protoiface.Methods {
size := func(input protoiface.SizeInput) protoiface.SizeOutput {
x := input.Message.Interface().(*DWN)
if x == nil {
return protoiface.SizeOutput{
NoUnkeyedLiterals: input.NoUnkeyedLiterals,
Size: 0,
}
}
options := runtime.SizeInputToOptions(input)
_ = options
var n int
var l int
_ = l
if x.Id != 0 {
n += 1 + runtime.Sov(uint64(x.Id))
}
l = len(x.Alias)
if l > 0 {
n += 1 + l + runtime.Sov(uint64(l))
}
l = len(x.Cid)
if l > 0 {
n += 1 + l + runtime.Sov(uint64(l))
}
l = len(x.Resolver)
if l > 0 {
n += 1 + l + runtime.Sov(uint64(l))
}
if x.unknownFields != nil {
n += len(x.unknownFields)
}
return protoiface.SizeOutput{
NoUnkeyedLiterals: input.NoUnkeyedLiterals,
Size: n,
}
}
marshal := func(input protoiface.MarshalInput) (protoiface.MarshalOutput, error) {
x := input.Message.Interface().(*DWN)
if x == nil {
return protoiface.MarshalOutput{
NoUnkeyedLiterals: input.NoUnkeyedLiterals,
Buf: input.Buf,
}, nil
}
options := runtime.MarshalInputToOptions(input)
_ = options
size := options.Size(x)
dAtA := make([]byte, size)
i := len(dAtA)
_ = i
var l int
_ = l
if x.unknownFields != nil {
i -= len(x.unknownFields)
copy(dAtA[i:], x.unknownFields)
}
if len(x.Resolver) > 0 {
i -= len(x.Resolver)
copy(dAtA[i:], x.Resolver)
i = runtime.EncodeVarint(dAtA, i, uint64(len(x.Resolver)))
i--
dAtA[i] = 0x22
}
if len(x.Cid) > 0 {
i -= len(x.Cid)
copy(dAtA[i:], x.Cid)
i = runtime.EncodeVarint(dAtA, i, uint64(len(x.Cid)))
i--
dAtA[i] = 0x1a
}
if len(x.Alias) > 0 {
i -= len(x.Alias)
copy(dAtA[i:], x.Alias)
i = runtime.EncodeVarint(dAtA, i, uint64(len(x.Alias)))
i--
dAtA[i] = 0x12
}
if x.Id != 0 {
i = runtime.EncodeVarint(dAtA, i, uint64(x.Id))
i--
dAtA[i] = 0x8
}
if input.Buf != nil {
input.Buf = append(input.Buf, dAtA...)
} else {
input.Buf = dAtA
}
return protoiface.MarshalOutput{
NoUnkeyedLiterals: input.NoUnkeyedLiterals,
Buf: input.Buf,
}, nil
}
unmarshal := func(input protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) {
x := input.Message.Interface().(*DWN)
if x == nil {
return protoiface.UnmarshalOutput{
NoUnkeyedLiterals: input.NoUnkeyedLiterals,
Flags: input.Flags,
}, nil
}
options := runtime.UnmarshalInputToOptions(input)
_ = options
dAtA := input.Buf
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow
}
if iNdEx >= l {
return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: DWN: wiretype end group for non-group")
}
if fieldNum <= 0 {
return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: DWN: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field Id", wireType)
}
x.Id = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow
}
if iNdEx >= l {
return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
x.Id |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 2 {
return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field Alias", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow
}
if iNdEx >= l {
return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength
}
if postIndex > l {
return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF
}
x.Alias = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field Cid", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow
}
if iNdEx >= l {
return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength
}
if postIndex > l {
return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF
}
x.Cid = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 4:
if wireType != 2 {
return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field Resolver", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow
}
if iNdEx >= l {
return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength
}
if postIndex > l {
return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF
}
x.Resolver = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := runtime.Skip(dAtA[iNdEx:])
if err != nil {
return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength
}
if (iNdEx + skippy) > l {
return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF
}
if !options.DiscardUnknown {
x.unknownFields = append(x.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
}
iNdEx += skippy
}
}
if iNdEx > l {
return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF
}
return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, nil
}
return &protoiface.Methods{
NoUnkeyedLiterals: struct{}{},
Flags: protoiface.SupportMarshalDeterministic | protoiface.SupportUnmarshalDiscardUnknown,
Size: size,
Marshal: marshal,
Unmarshal: unmarshal,
Merge: nil,
CheckInitialized: nil,
}
}
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.27.0
// protoc (unknown)
// source: vault/v1/state.proto
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type DWN struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
Alias string `protobuf:"bytes,2,opt,name=alias,proto3" json:"alias,omitempty"`
Cid string `protobuf:"bytes,3,opt,name=cid,proto3" json:"cid,omitempty"`
Resolver string `protobuf:"bytes,4,opt,name=resolver,proto3" json:"resolver,omitempty"`
}
func (x *DWN) Reset() {
*x = DWN{}
if protoimpl.UnsafeEnabled {
mi := &file_vault_v1_state_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *DWN) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DWN) ProtoMessage() {}
// Deprecated: Use DWN.ProtoReflect.Descriptor instead.
func (*DWN) Descriptor() ([]byte, []int) {
return file_vault_v1_state_proto_rawDescGZIP(), []int{0}
}
func (x *DWN) GetId() uint64 {
if x != nil {
return x.Id
}
return 0
}
func (x *DWN) GetAlias() string {
if x != nil {
return x.Alias
}
return ""
}
func (x *DWN) GetCid() string {
if x != nil {
return x.Cid
}
return ""
}
func (x *DWN) GetResolver() string {
if x != nil {
return x.Resolver
}
return ""
}
var File_vault_v1_state_proto protoreflect.FileDescriptor
var file_vault_v1_state_proto_rawDesc = []byte{
0x0a, 0x14, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x65,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x2e, 0x76, 0x31,
0x1a, 0x17, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2f, 0x6f, 0x72, 0x6d, 0x2f, 0x76, 0x31, 0x2f,
0x6f, 0x72, 0x6d, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x83, 0x01, 0x0a, 0x03, 0x44, 0x57,
0x4e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x69,
0x64, 0x12, 0x14, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x63, 0x69, 0x64, 0x18, 0x03,
0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x63, 0x69, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73,
0x6f, 0x6c, 0x76, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x73,
0x6f, 0x6c, 0x76, 0x65, 0x72, 0x3a, 0x28, 0xf2, 0x9e, 0xd3, 0x8e, 0x03, 0x22, 0x0a, 0x06, 0x0a,
0x02, 0x69, 0x64, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x10, 0x01,
0x18, 0x01, 0x12, 0x09, 0x0a, 0x03, 0x63, 0x69, 0x64, 0x10, 0x02, 0x18, 0x01, 0x18, 0x01, 0x42,
0x88, 0x01, 0x0a, 0x0c, 0x63, 0x6f, 0x6d, 0x2e, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x2e, 0x76, 0x31,
0x42, 0x0a, 0x53, 0x74, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2b,
0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6f, 0x6e, 0x73, 0x6f, 0x6e,
0x72, 0x2f, 0x73, 0x6f, 0x6e, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x61, 0x75, 0x6c, 0x74,
0x2f, 0x76, 0x31, 0x3b, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x76, 0x31, 0xa2, 0x02, 0x03, 0x56, 0x58,
0x58, 0xaa, 0x02, 0x08, 0x56, 0x61, 0x75, 0x6c, 0x74, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x08, 0x56,
0x61, 0x75, 0x6c, 0x74, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x14, 0x56, 0x61, 0x75, 0x6c, 0x74, 0x5c,
0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02,
0x09, 0x56, 0x61, 0x75, 0x6c, 0x74, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x33,
}
var (
file_vault_v1_state_proto_rawDescOnce sync.Once
file_vault_v1_state_proto_rawDescData = file_vault_v1_state_proto_rawDesc
)
func file_vault_v1_state_proto_rawDescGZIP() []byte {
file_vault_v1_state_proto_rawDescOnce.Do(func() {
file_vault_v1_state_proto_rawDescData = protoimpl.X.CompressGZIP(file_vault_v1_state_proto_rawDescData)
})
return file_vault_v1_state_proto_rawDescData
}
var file_vault_v1_state_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
var file_vault_v1_state_proto_goTypes = []interface{}{
(*DWN)(nil), // 0: vault.v1.DWN
}
var file_vault_v1_state_proto_depIdxs = []int32{
0, // [0:0] is the sub-list for method output_type
0, // [0:0] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_vault_v1_state_proto_init() }
func file_vault_v1_state_proto_init() {
if File_vault_v1_state_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_vault_v1_state_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DWN); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_vault_v1_state_proto_rawDesc,
NumEnums: 0,
NumMessages: 1,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_vault_v1_state_proto_goTypes,
DependencyIndexes: file_vault_v1_state_proto_depIdxs,
MessageInfos: file_vault_v1_state_proto_msgTypes,
}.Build()
File_vault_v1_state_proto = out.File
file_vault_v1_state_proto_rawDesc = nil
file_vault_v1_state_proto_goTypes = nil
file_vault_v1_state_proto_depIdxs = nil
}

File diff suppressed because it is too large Load Diff

View File

@ -134,6 +134,15 @@ import (
ibcexported "github.com/cosmos/ibc-go/v8/modules/core/exported"
ibckeeper "github.com/cosmos/ibc-go/v8/modules/core/keeper"
ibctm "github.com/cosmos/ibc-go/v8/modules/light-clients/07-tendermint"
did "github.com/onsonr/sonr/x/did"
didkeeper "github.com/onsonr/sonr/x/did/keeper"
didtypes "github.com/onsonr/sonr/x/did/types"
dwn "github.com/onsonr/sonr/x/dwn"
dwnkeeper "github.com/onsonr/sonr/x/dwn/keeper"
dwntypes "github.com/onsonr/sonr/x/dwn/types"
svc "github.com/onsonr/sonr/x/svc"
svckeeper "github.com/onsonr/sonr/x/svc/keeper"
svctypes "github.com/onsonr/sonr/x/svc/types"
"github.com/spf13/cast"
globalfee "github.com/strangelove-ventures/globalfee/x/globalfee"
globalfeekeeper "github.com/strangelove-ventures/globalfee/x/globalfee/keeper"
@ -144,16 +153,6 @@ import (
tokenfactory "github.com/strangelove-ventures/tokenfactory/x/tokenfactory"
tokenfactorykeeper "github.com/strangelove-ventures/tokenfactory/x/tokenfactory/keeper"
tokenfactorytypes "github.com/strangelove-ventures/tokenfactory/x/tokenfactory/types"
did "github.com/onsonr/sonr/x/did"
didkeeper "github.com/onsonr/sonr/x/did/keeper"
didtypes "github.com/onsonr/sonr/x/did/types"
service "github.com/onsonr/sonr/x/service"
servicekeeper "github.com/onsonr/sonr/x/service/keeper"
servicetypes "github.com/onsonr/sonr/x/service/types"
vault "github.com/onsonr/sonr/x/vault"
vaultkeeper "github.com/onsonr/sonr/x/vault/keeper"
vaulttypes "github.com/onsonr/sonr/x/vault/types"
)
const appName = "sonr"
@ -232,8 +231,8 @@ type SonrApp struct {
CrisisKeeper *crisiskeeper.Keeper
UpgradeKeeper *upgradekeeper.Keeper
legacyAmino *codec.LegacyAmino
VaultKeeper vaultkeeper.Keeper
ServiceKeeper servicekeeper.Keeper
DwnKeeper dwnkeeper.Keeper
SvcKeeper svckeeper.Keeper
sm *module.SimulationManager
BasicModuleManager module.BasicManager
ModuleManager *module.Manager
@ -366,8 +365,8 @@ func NewChainApp(
globalfeetypes.StoreKey,
packetforwardtypes.StoreKey,
didtypes.StoreKey,
vaulttypes.StoreKey,
servicetypes.StoreKey,
dwntypes.StoreKey,
svctypes.StoreKey,
)
tkeys := storetypes.NewTransientStoreKeys(paramstypes.TStoreKey)
@ -627,28 +626,43 @@ func NewChainApp(
app.StakingKeeper,
)
// Create the vault Keeper
app.VaultKeeper = vaultkeeper.NewKeeper(
// Create the svc Keeper
app.SvcKeeper = svckeeper.NewKeeper(
appCodec,
sdkruntime.NewKVStoreService(keys[vaulttypes.StoreKey]),
sdkruntime.NewKVStoreService(keys[svctypes.StoreKey]),
logger,
authtypes.NewModuleAddress(govtypes.ModuleName).String(),
app.AccountKeeper,
app.DidKeeper,
)
// Create the service Keeper
app.ServiceKeeper = servicekeeper.NewKeeper(
// Create the dwn Keeper
app.DwnKeeper = dwnkeeper.NewKeeper(
appCodec,
sdkruntime.NewKVStoreService(keys[servicetypes.StoreKey]),
sdkruntime.NewKVStoreService(keys[dwntypes.StoreKey]),
logger,
authtypes.NewModuleAddress(govtypes.ModuleName).String(),
app.DidKeeper,
app.GroupKeeper,
app.NFTKeeper,
app.VaultKeeper,
)
// // Create the vault Keeper
// app.VaultKeeper = vaultkeeper.NewKeeper(
// appCodec,
// sdkruntime.NewKVStoreService(keys[vaulttypes.StoreKey]),
// logger,
// authtypes.NewModuleAddress(govtypes.ModuleName).String(),
// app.AccountKeeper,
// app.DidKeeper,
// )
//
// // Create the service Keeper
// app.ServiceKeeper = servicekeeper.NewKeeper(
// appCodec,
// sdkruntime.NewKVStoreService(keys[servicetypes.StoreKey]),
// logger,
// authtypes.NewModuleAddress(govtypes.ModuleName).String(),
// app.DidKeeper,
// app.GroupKeeper,
// app.NFTKeeper,
// app.VaultKeeper,
// )
//
// Create the globalfee keeper
app.GlobalFeeKeeper = globalfeekeeper.NewKeeper(
appCodec,
@ -906,10 +920,8 @@ func NewChainApp(
),
did.NewAppModule(appCodec, app.DidKeeper, app.NFTKeeper),
vault.NewAppModule(appCodec, app.VaultKeeper, app.DidKeeper),
service.NewAppModule(appCodec, app.ServiceKeeper, app.DidKeeper),
dwn.NewAppModule(appCodec, app.DwnKeeper),
svc.NewAppModule(appCodec, app.SvcKeeper),
)
// BasicModuleManager defines the module BasicManager is in charge of setting up basic,
@ -958,8 +970,8 @@ func NewChainApp(
tokenfactorytypes.ModuleName,
packetforwardtypes.ModuleName,
didtypes.ModuleName,
vaulttypes.ModuleName,
servicetypes.ModuleName,
dwntypes.ModuleName,
svctypes.ModuleName,
)
app.ModuleManager.SetOrderEndBlockers(
@ -979,8 +991,8 @@ func NewChainApp(
tokenfactorytypes.ModuleName,
packetforwardtypes.ModuleName,
didtypes.ModuleName,
vaulttypes.ModuleName,
servicetypes.ModuleName,
dwntypes.ModuleName,
svctypes.ModuleName,
)
// NOTE: The genutils module must occur after staking so that pools are
@ -1009,8 +1021,8 @@ func NewChainApp(
globalfeetypes.ModuleName,
packetforwardtypes.ModuleName,
didtypes.ModuleName,
vaulttypes.ModuleName,
servicetypes.ModuleName,
dwntypes.ModuleName,
svctypes.ModuleName,
}
app.ModuleManager.SetOrderInitGenesis(genesisModuleOrder...)
app.ModuleManager.SetOrderExportGenesis(genesisModuleOrder...)
@ -1469,8 +1481,8 @@ func initParamsKeeper(
paramsKeeper.Subspace(packetforwardtypes.ModuleName).
WithKeyTable(packetforwardtypes.ParamKeyTable())
paramsKeeper.Subspace(didtypes.ModuleName)
paramsKeeper.Subspace(vaulttypes.ModuleName)
paramsKeeper.Subspace(servicetypes.ModuleName)
paramsKeeper.Subspace(dwntypes.ModuleName)
paramsKeeper.Subspace(svctypes.ModuleName)
return paramsKeeper
}

View File

@ -1,165 +0,0 @@
package dexmodel
import (
"fmt"
"time"
"github.com/charmbracelet/bubbles/table"
tea "github.com/charmbracelet/bubbletea"
"github.com/charmbracelet/lipgloss"
"github.com/spf13/cobra"
)
var (
subtle = lipgloss.AdaptiveColor{Light: "#D9DCCF", Dark: "#383838"}
highlight = lipgloss.AdaptiveColor{Light: "#874BFD", Dark: "#7D56F4"}
special = lipgloss.AdaptiveColor{Light: "#43BF6D", Dark: "#73F59F"}
titleStyle = lipgloss.NewStyle().
MarginLeft(1).
MarginRight(5).
Padding(0, 1).
Italic(true).
Foreground(lipgloss.Color("#FFF7DB")).
SetString("Cosmos Block Explorer")
infoStyle = lipgloss.NewStyle().
BorderStyle(lipgloss.NormalBorder()).
BorderTop(true).
BorderForeground(subtle)
)
type model struct {
blocks []string
transactionTable table.Model
stats map[string]string
width int
height int
}
func initialModel() model {
columns := []table.Column{
{Title: "Hash", Width: 10},
{Title: "Type", Width: 15},
{Title: "Height", Width: 10},
{Title: "Time", Width: 20},
}
rows := []table.Row{
{"abc123", "Transfer", "1000", time.Now().Format(time.RFC3339)},
{"def456", "Delegate", "999", time.Now().Add(-1 * time.Minute).Format(time.RFC3339)},
{"ghi789", "Vote", "998", time.Now().Add(-2 * time.Minute).Format(time.RFC3339)},
}
t := table.New(
table.WithColumns(columns),
table.WithRows(rows),
table.WithFocused(true),
table.WithHeight(7),
)
s := table.DefaultStyles()
s.Header = s.Header.
BorderStyle(lipgloss.NormalBorder()).
BorderForeground(lipgloss.Color("240")).
BorderBottom(true).
Bold(false)
s.Selected = s.Selected.
Foreground(lipgloss.Color("229")).
Background(lipgloss.Color("57")).
Bold(false)
t.SetStyles(s)
return model{
blocks: []string{"Block 1", "Block 2", "Block 3"},
transactionTable: t,
stats: map[string]string{
"Latest Block": "1000",
"Validators": "100",
"Bonded Tokens": "1,000,000",
},
}
}
func (m model) Init() tea.Cmd {
return tick
}
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
var cmd tea.Cmd
switch msg := msg.(type) {
case tea.KeyMsg:
switch msg.String() {
case "q", "ctrl+c":
return m, tea.Quit
case "enter":
return m, tea.Batch(
tea.Printf("Selected transaction: %s", m.transactionTable.SelectedRow()[0]),
)
}
case tea.WindowSizeMsg:
m.height = msg.Height
m.width = msg.Width
case tickMsg:
// Update data here
m.blocks = append([]string{"New Block"}, m.blocks...)
if len(m.blocks) > 5 {
m.blocks = m.blocks[:5]
}
// Add a new transaction to the table
newRow := table.Row{
fmt.Sprintf("tx%d", time.Now().Unix()),
"NewTxType",
fmt.Sprintf("%d", 1000+len(m.transactionTable.Rows())),
time.Now().Format(time.RFC3339),
}
m.transactionTable.SetRows(append([]table.Row{newRow}, m.transactionTable.Rows()...))
if len(m.transactionTable.Rows()) > 10 {
m.transactionTable.SetRows(m.transactionTable.Rows()[:10])
}
return m, tick
}
m.transactionTable, cmd = m.transactionTable.Update(msg)
return m, cmd
}
func (m model) View() string {
s := titleStyle.Render("Cosmos Block Explorer")
s += "\n\n"
// Blocks
s += lipgloss.NewStyle().Bold(true).Render("Recent Blocks") + "\n"
for _, block := range m.blocks {
s += "• " + block + "\n"
}
s += "\n"
// Transactions
s += lipgloss.NewStyle().Bold(true).Render("Recent Transactions") + "\n"
s += m.transactionTable.View() + "\n\n"
// Stats
s += lipgloss.NewStyle().Bold(true).Render("Network Statistics") + "\n"
for key, value := range m.stats {
s += fmt.Sprintf("%s: %s\n", key, value)
}
return s
}
type tickMsg time.Time
func tick() tea.Msg {
time.Sleep(time.Second)
return tickMsg{}
}
func RunExplorerTUI(cmd *cobra.Command, args []string) error {
p := tea.NewProgram(initialModel(), tea.WithAltScreen())
if _, err := p.Run(); err != nil {
return fmt.Errorf("error running explorer: %v", err)
}
return nil
}

View File

@ -1,44 +0,0 @@
package cli
import (
"fmt"
"github.com/cosmos/cosmos-sdk/codec"
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
"github.com/onsonr/sonr/app/cli/dexmodel"
"github.com/onsonr/sonr/app/cli/txmodel"
"github.com/spf13/cobra"
)
func NewBuildTxnTUICmd() *cobra.Command {
return &cobra.Command{
Use: "dash",
Short: "TUI for managing the local Sonr validator node",
RunE: func(cmd *cobra.Command, args []string) error {
txBody, err := txmodel.RunBuildTxnTUI()
if err != nil {
return err
}
interfaceRegistry := codectypes.NewInterfaceRegistry()
marshaler := codec.NewProtoCodec(interfaceRegistry)
jsonBytes, err := marshaler.MarshalJSON(txBody)
if err != nil {
return fmt.Errorf("failed to marshal tx body: %w", err)
}
fmt.Println("Generated Protobuf Message (JSON format):")
fmt.Println(string(jsonBytes))
return nil
},
}
}
func NewExplorerTUICmd() *cobra.Command {
return &cobra.Command{
Use: "cosmos-explorer",
Short: "A terminal-based Cosmos blockchain explorer",
RunE: dexmodel.RunExplorerTUI,
}
}

View File

@ -1,322 +0,0 @@
package txmodel
import (
"fmt"
"strings"
tea "github.com/charmbracelet/bubbletea"
"github.com/charmbracelet/huh"
"github.com/charmbracelet/lipgloss"
"github.com/cosmos/cosmos-sdk/codec"
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/cosmos-sdk/types/tx"
banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
)
const maxWidth = 100
var (
red = lipgloss.AdaptiveColor{Light: "#FE5F86", Dark: "#FE5F86"}
indigo = lipgloss.AdaptiveColor{Light: "#5A56E0", Dark: "#7571F9"}
green = lipgloss.AdaptiveColor{Light: "#02BA84", Dark: "#02BF87"}
)
type Styles struct {
Base,
HeaderText,
Status,
StatusHeader,
Highlight,
ErrorHeaderText,
Help lipgloss.Style
}
func NewStyles(lg *lipgloss.Renderer) *Styles {
s := Styles{}
s.Base = lg.NewStyle().
Padding(1, 2, 0, 1)
s.HeaderText = lg.NewStyle().
Foreground(indigo).
Bold(true).
Padding(0, 1, 0, 1)
s.Status = lg.NewStyle().
Border(lipgloss.RoundedBorder()).
BorderForeground(indigo).
PaddingLeft(1).
MarginTop(1)
s.StatusHeader = lg.NewStyle().
Foreground(green).
Bold(true)
s.Highlight = lg.NewStyle().
Foreground(lipgloss.Color("212"))
s.ErrorHeaderText = s.HeaderText.
Foreground(red)
s.Help = lg.NewStyle().
Foreground(lipgloss.Color("240"))
return &s
}
type state int
const (
statusNormal state = iota
stateDone
)
type Model struct {
state state
lg *lipgloss.Renderer
styles *Styles
form *huh.Form
width int
message *tx.TxBody
}
func NewModel() Model {
m := Model{width: maxWidth}
m.lg = lipgloss.DefaultRenderer()
m.styles = NewStyles(m.lg)
m.form = huh.NewForm(
huh.NewGroup(
huh.NewInput().
Key("from").
Title("From Address").
Placeholder("cosmos1...").
Validate(func(s string) error {
if !strings.HasPrefix(s, "cosmos1") {
return fmt.Errorf("invalid address format")
}
return nil
}),
huh.NewInput().
Key("to").
Title("To Address").
Placeholder("cosmos1...").
Validate(func(s string) error {
if !strings.HasPrefix(s, "cosmos1") {
return fmt.Errorf("invalid address format")
}
return nil
}),
huh.NewInput().
Key("amount").
Title("Amount").
Placeholder("100").
Validate(func(s string) error {
if _, err := sdk.ParseCoinNormalized(s + "atom"); err != nil {
return fmt.Errorf("invalid coin amount")
}
return nil
}),
huh.NewSelect[string]().
Key("denom").
Title("Denom").
Options(huh.NewOptions("atom", "osmo", "usnr", "snr")...),
huh.NewInput().
Key("memo").
Title("Memo").
Placeholder("Optional"),
huh.NewConfirm().
Key("done").
Title("Ready to convert?").
Validate(func(v bool) error {
if !v {
return fmt.Errorf("Please confirm when you're ready to convert")
}
return nil
}).
Affirmative("Yes, convert!").
Negative("Not yet"),
),
).
WithWidth(60).
WithShowHelp(false).
WithShowErrors(false)
return m
}
func (m Model) Init() tea.Cmd {
return m.form.Init()
}
func min(x, y int) int {
if x > y {
return y
}
return x
}
func (m Model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
switch msg := msg.(type) {
case tea.WindowSizeMsg:
m.width = min(msg.Width, maxWidth) - m.styles.Base.GetHorizontalFrameSize()
case tea.KeyMsg:
switch msg.String() {
case "esc", "ctrl+c", "q":
return m, tea.Quit
}
}
var cmds []tea.Cmd
form, cmd := m.form.Update(msg)
if f, ok := form.(*huh.Form); ok {
m.form = f
cmds = append(cmds, cmd)
}
if m.form.State == huh.StateCompleted {
m.buildMessage()
cmds = append(cmds, tea.Quit)
}
return m, tea.Batch(cmds...)
}
func (m Model) View() string {
s := m.styles
switch m.form.State {
case huh.StateCompleted:
pklCode := m.generatePkl()
messageView := m.getMessageView()
var b strings.Builder
fmt.Fprintf(&b, "Final Tx:\n\n%s\n\n%s", pklCode, messageView)
return s.Status.Margin(0, 1).Padding(1, 2).Width(80).Render(b.String()) + "\n\n"
default:
var schemaType string
if m.form.GetString("schemaType") != "" {
schemaType = "Schema Type: " + m.form.GetString("schemaType")
}
v := strings.TrimSuffix(m.form.View(), "\n\n")
form := m.lg.NewStyle().Margin(1, 0).Render(v)
var status string
{
preview := "(Preview will appear here)"
if m.form.GetString("schema") != "" {
preview = m.generatePkl()
}
const statusWidth = 40
statusMarginLeft := m.width - statusWidth - lipgloss.Width(form) - s.Status.GetMarginRight()
status = s.Status.
Height(lipgloss.Height(form)).
Width(statusWidth).
MarginLeft(statusMarginLeft).
Render(s.StatusHeader.Render("Pkl Preview") + "\n" +
schemaType + "\n\n" +
preview)
}
errors := m.form.Errors()
header := m.appBoundaryView("Sonr TX Builder")
if len(errors) > 0 {
header = m.appErrorBoundaryView(m.errorView())
}
body := lipgloss.JoinHorizontal(lipgloss.Top, form, status)
footer := m.appBoundaryView(m.form.Help().ShortHelpView(m.form.KeyBinds()))
if len(errors) > 0 {
footer = m.appErrorBoundaryView("")
}
return s.Base.Render(header + "\n" + body + "\n\n" + footer)
}
}
func (m Model) errorView() string {
var s string
for _, err := range m.form.Errors() {
s += err.Error()
}
return s
}
func (m Model) appBoundaryView(text string) string {
return lipgloss.PlaceHorizontal(
m.width,
lipgloss.Left,
m.styles.HeaderText.Render(text),
lipgloss.WithWhitespaceChars("="),
lipgloss.WithWhitespaceForeground(indigo),
)
}
func (m Model) appErrorBoundaryView(text string) string {
return lipgloss.PlaceHorizontal(
m.width,
lipgloss.Left,
m.styles.ErrorHeaderText.Render(text),
lipgloss.WithWhitespaceChars("="),
lipgloss.WithWhitespaceForeground(red),
)
}
func (m Model) generatePkl() string {
schemaType := m.form.GetString("schemaType")
schema := m.form.GetString("schema")
// This is a placeholder for the actual conversion logic
// In a real implementation, you would parse the schema and generate Pkl code
return fmt.Sprintf("// Converted from %s\n\nclass ConvertedSchema {\n // TODO: Implement conversion from %s\n // Original schema:\n /*\n%s\n */\n}", schemaType, schemaType, schema)
}
func (m *Model) buildMessage() {
from := m.form.GetString("from")
to := m.form.GetString("to")
amount := m.form.GetString("amount")
denom := m.form.GetString("denom")
memo := m.form.GetString("memo")
coin, _ := sdk.ParseCoinNormalized(fmt.Sprintf("%s%s", amount, denom))
sendMsg := &banktypes.MsgSend{
FromAddress: from,
ToAddress: to,
Amount: sdk.NewCoins(coin),
}
anyMsg, _ := codectypes.NewAnyWithValue(sendMsg)
m.message = &tx.TxBody{
Messages: []*codectypes.Any{anyMsg},
Memo: memo,
}
}
func (m Model) getMessageView() string {
if m.message == nil {
return "Current Message: None"
}
interfaceRegistry := codectypes.NewInterfaceRegistry()
marshaler := codec.NewProtoCodec(interfaceRegistry)
jsonBytes, _ := marshaler.MarshalJSON(m.message)
return fmt.Sprintf("Current Message:\n%s", string(jsonBytes))
}
func RunBuildTxnTUI() (*tx.TxBody, error) {
m := NewModel()
p := tea.NewProgram(m)
finalModel, err := p.Run()
if err != nil {
return nil, fmt.Errorf("failed to run program: %w", err)
}
finalM, ok := finalModel.(Model)
if !ok || finalM.message == nil {
return nil, fmt.Errorf("form not completed")
}
return finalM.message, nil
}

3
buf.work.yaml Normal file
View File

@ -0,0 +1,3 @@
version: v1
directories:
- proto

Binary file not shown.

1
cmd/hway/config.pkl Normal file
View File

@ -0,0 +1 @@
amends "https://pkl.sh/sonr.hway/0.0.3/Env.pkl"

View File

@ -1,18 +1,58 @@
//go:build js && wasm
package main
import (
_ "embed"
"fmt"
"log"
"net/http"
"os"
"github.com/labstack/echo/v4"
"github.com/onsonr/sonr/internal/ctx"
"github.com/onsonr/sonr/pkg/workers/routes"
"github.com/syumai/workers"
"github.com/labstack/echo/v4/middleware"
"github.com/onsonr/sonr/crypto/ucan"
"github.com/onsonr/sonr/pkg/common/ipfs"
"github.com/onsonr/sonr/pkg/common/producer"
"github.com/onsonr/sonr/pkg/gateway"
"github.com/onsonr/sonr/pkg/gateway/config"
)
func main() {
s := echo.New()
s.Use(ctx.HighwaySessionMiddleware)
routes.RegisterGatewayViews(s)
routes.RegisterGatewayAPI(s)
workers.Serve(s)
//go:embed config.pkl
var configBz []byte
func loadConfig() (config.Env, error) {
return config.LoadFromBytes(configBz)
}
// setupServer sets up the server
func setupServer(env config.Env) (*echo.Echo, error) {
ipc, err := ipfs.NewClient()
if err != nil {
return nil, err
}
e := echo.New()
e.IPExtractor = echo.ExtractIPDirect()
e.Use(middleware.Logger())
e.Use(middleware.Recover())
e.Use(producer.Middleware(ipc, ucan.ServicePermissions))
gateway.RegisterRoutes(e, env)
return e, nil
}
// main is the entry point for the application
func main() {
env, err := loadConfig()
if err != nil {
panic(err)
}
e, err := setupServer(env)
if err != nil {
panic(err)
}
if err := e.Start(fmt.Sprintf(":%d", env.GetServePort())); err != http.ErrServerClosed {
log.Fatal(err)
os.Exit(1)
return
}
}

View File

@ -1,8 +0,0 @@
name = "sonr-id"
main = "./build/worker.mjs"
compatibility_date = "2024-10-07"
routes = [{ pattern = "sonr.id", custom_domain = true }]
[build]
command = "task hway:build"

211
cmd/motr/internal/fetch.go Normal file
View File

@ -0,0 +1,211 @@
//go:build js && wasm
// +build js,wasm
package internal
import (
"bytes"
"fmt"
"io"
"net/http"
"net/http/httptest"
"strings"
"sync"
"syscall/js"
)
var (
// Global buffer pool to reduce allocations
bufferPool = sync.Pool{
New: func() interface{} {
return new(bytes.Buffer)
},
}
// Cached JS globals
jsGlobal = js.Global()
jsUint8Array = jsGlobal.Get("Uint8Array")
jsResponse = jsGlobal.Get("Response")
jsPromise = jsGlobal.Get("Promise")
jsWasmHTTP = jsGlobal.Get("wasmhttp")
)
// serveFetch serves HTTP requests with optimized handler management
func ServeFetch(handler http.Handler) func() {
h := handler
if h == nil {
h = http.DefaultServeMux
}
// Optimize prefix handling
prefix := strings.TrimRight(jsWasmHTTP.Get("path").String(), "/")
if prefix != "" {
mux := http.NewServeMux()
mux.Handle(prefix+"/", http.StripPrefix(prefix, h))
h = mux
}
// Create request handler function
cb := js.FuncOf(func(_ js.Value, args []js.Value) interface{} {
promise, resolve, reject := newPromiseOptimized()
go handleRequest(h, args[1], resolve, reject)
return promise
})
jsWasmHTTP.Call("setHandler", cb)
return cb.Release
}
// handleRequest processes the request with panic recovery
func handleRequest(h http.Handler, jsReq js.Value, resolve, reject func(interface{})) {
defer func() {
if r := recover(); r != nil {
var errMsg string
if err, ok := r.(error); ok {
errMsg = fmt.Sprintf("wasmhttp: panic: %+v", err)
} else {
errMsg = fmt.Sprintf("wasmhttp: panic: %v", r)
}
reject(errMsg)
}
}()
recorder := newResponseRecorder()
h.ServeHTTP(recorder, buildRequest(jsReq))
resolve(recorder.jsResponse())
}
// buildRequest creates an http.Request from JS Request
func buildRequest(jsReq js.Value) *http.Request {
// Get request body
arrayBuffer, err := awaitPromiseOptimized(jsReq.Call("arrayBuffer"))
if err != nil {
panic(err)
}
// Create body buffer
jsBody := jsUint8Array.New(arrayBuffer)
bodyLen := jsBody.Get("length").Int()
body := make([]byte, bodyLen)
js.CopyBytesToGo(body, jsBody)
// Create request
req := httptest.NewRequest(
jsReq.Get("method").String(),
jsReq.Get("url").String(),
bytes.NewReader(body),
)
// Set headers efficiently
headers := jsReq.Get("headers")
headersIt := headers.Call("entries")
for {
entry := headersIt.Call("next")
if entry.Get("done").Bool() {
break
}
pair := entry.Get("value")
req.Header.Set(pair.Index(0).String(), pair.Index(1).String())
}
return req
}
// ResponseRecorder with optimized buffer handling
type ResponseRecorder struct {
*httptest.ResponseRecorder
buffer *bytes.Buffer
}
func newResponseRecorder() *ResponseRecorder {
return &ResponseRecorder{
ResponseRecorder: httptest.NewRecorder(),
buffer: bufferPool.Get().(*bytes.Buffer),
}
}
// jsResponse creates a JS Response with optimized memory usage
func (rr *ResponseRecorder) jsResponse() js.Value {
defer func() {
rr.buffer.Reset()
bufferPool.Put(rr.buffer)
}()
res := rr.Result()
defer res.Body.Close()
// Prepare response body
body := js.Undefined()
if res.ContentLength != 0 {
if _, err := io.Copy(rr.buffer, res.Body); err != nil {
panic(err)
}
bodyBytes := rr.buffer.Bytes()
body = jsUint8Array.New(len(bodyBytes))
js.CopyBytesToJS(body, bodyBytes)
}
// Prepare response init object
init := make(map[string]interface{}, 3)
if res.StatusCode != 0 {
init["status"] = res.StatusCode
}
if len(res.Header) > 0 {
headers := make(map[string]interface{}, len(res.Header))
for k, v := range res.Header {
if len(v) > 0 {
headers[k] = v[0]
}
}
init["headers"] = headers
}
return jsResponse.New(body, init)
}
// newPromiseOptimized creates a new JavaScript Promise with optimized callback handling
func newPromiseOptimized() (js.Value, func(interface{}), func(interface{})) {
var (
resolve func(interface{})
reject func(interface{})
promiseFunc = js.FuncOf(func(_ js.Value, args []js.Value) interface{} {
resolve = func(v interface{}) { args[0].Invoke(v) }
reject = func(v interface{}) { args[1].Invoke(v) }
return js.Undefined()
})
)
defer promiseFunc.Release()
return jsPromise.New(promiseFunc), resolve, reject
}
// awaitPromiseOptimized waits for Promise resolution with optimized channel handling
func awaitPromiseOptimized(promise js.Value) (js.Value, error) {
done := make(chan struct{})
var (
result js.Value
err error
)
thenFunc := js.FuncOf(func(_ js.Value, args []js.Value) interface{} {
result = args[0]
close(done)
return nil
})
defer thenFunc.Release()
catchFunc := js.FuncOf(func(_ js.Value, args []js.Value) interface{} {
err = js.Error{Value: args[0]}
close(done)
return nil
})
defer catchFunc.Release()
promise.Call("then", thenFunc).Call("catch", catchFunc)
<-done
return result, err
}

34
cmd/motr/internal/wasm.go Normal file
View File

@ -0,0 +1,34 @@
//go:build js && wasm
// +build js,wasm
package internal
import (
"encoding/base64"
"encoding/json"
"github.com/labstack/echo/v4"
)
func WasmContextMiddleware(next echo.HandlerFunc) echo.HandlerFunc {
return func(c echo.Context) error {
// Extract WASM context from headers
if wasmCtx := c.Request().Header.Get("X-Wasm-Context"); wasmCtx != "" {
if ctx, err := DecodeWasmContext(wasmCtx); err == nil {
c.Set("wasm_context", ctx)
}
}
return next(c)
}
}
// decodeWasmContext decodes the WASM context from a base64 encoded string
func DecodeWasmContext(ctx string) (map[string]any, error) {
decoded, err := base64.StdEncoding.DecodeString(ctx)
if err != nil {
return nil, err
}
var ctxData map[string]any
err = json.Unmarshal(decoded, &ctxData)
return ctxData, err
}

55
cmd/motr/main.go Normal file
View File

@ -0,0 +1,55 @@
//go:build js && wasm
// +build js,wasm
package main
import (
"encoding/json"
"syscall/js"
"github.com/labstack/echo/v4"
"github.com/onsonr/sonr/cmd/motr/internal"
"github.com/onsonr/sonr/pkg/common/controller"
"github.com/onsonr/sonr/pkg/vault"
"github.com/onsonr/sonr/pkg/vault/types"
)
var (
env *types.Environment
config *types.Config
err error
)
func broadcastTx(this js.Value, args []js.Value) interface{} {
return nil
}
func simulateTx(this js.Value, args []js.Value) interface{} {
return nil
}
func processConfig(this js.Value, args []js.Value) interface{} {
if len(args) < 1 {
return nil
}
configString := args[0].String()
if err := json.Unmarshal([]byte(configString), &config); err != nil {
println("Error parsing config:", err.Error())
return nil
}
return nil
}
func main() {
// Load dwn config
js.Global().Set("broadcastTx", js.FuncOf(broadcastTx))
js.Global().Set("simulateTx", js.FuncOf(simulateTx))
js.Global().Set("processConfig", js.FuncOf(processConfig))
e := echo.New()
e.Use(internal.WasmContextMiddleware)
e.Use(controller.Middleware(nil))
vault.RegisterRoutes(e, config)
internal.ServeFetch(e)
}

View File

@ -34,6 +34,7 @@ import (
genutilcli "github.com/cosmos/cosmos-sdk/x/genutil/client/cli"
)
// TODO: Load this from PKL
// initCometBFTConfig helps to override default CometBFT Config values.
// return cmtcfg.DefaultConfig if no custom configuration is required for the application.
func initCometBFTConfig() *cmtcfg.Config {
@ -46,12 +47,13 @@ func initCometBFTConfig() *cmtcfg.Config {
return cfg
}
// TODO: Load this from PKL
// initAppConfig helps to override default appConfig template and configs.
// return "", nil if no custom configuration is required for the application.
func initAppConfig() (string, interface{}) {
// The following code snippet is just for reference.
type CustomAppConfig struct {
type SonrAppConfig struct {
serverconfig.Config
}
@ -73,7 +75,7 @@ func initAppConfig() (string, interface{}) {
srvCfg.MinGasPrices = "0stake"
// srvCfg.BaseConfig.IAVLDisableFastNode = true // disable fastnode by default
customAppConfig := CustomAppConfig{
customAppConfig := SonrAppConfig{
Config: *srvCfg,
}

View File

@ -8,13 +8,10 @@ import (
_ "github.com/joho/godotenv/autoload"
"github.com/onsonr/sonr/app"
"github.com/onsonr/sonr/app/cli"
)
func main() {
rootCmd := NewRootCmd()
rootCmd.AddCommand(cli.NewBuildTxnTUICmd())
rootCmd.AddCommand(cli.NewExplorerTUICmd())
if err := svrcmd.Execute(rootCmd, "", app.DefaultNodeHome); err != nil {
log.NewLogger(rootCmd.OutOrStderr()).Error("failure when running app", "err", err)

View File

@ -1 +0,0 @@
package main

171
crypto/accumulator/accumulator.go Executable file
View File

@ -0,0 +1,171 @@
//
// Copyright Coinbase, Inc. All Rights Reserved.
//
// SPDX-License-Identifier: Apache-2.0
//
// Package accumulator implements the cryptographic accumulator as described in https://eprint.iacr.org/2020/777.pdf
// It also implements the zero knowledge proof of knowledge protocol
// described in section 7 of the paper.
// Note: the paper only describes for non-membership witness case, but we don't
// use non-membership witness. We only implement the membership witness case.
package accumulator
import (
"fmt"
"git.sr.ht/~sircmpwn/go-bare"
"github.com/onsonr/sonr/crypto/core/curves"
)
type structMarshal struct {
Curve string `bare:"curve"`
Value []byte `bare:"value"`
}
type Element curves.Scalar
// Coefficient is a point
type Coefficient curves.Point
// Accumulator is a point
type Accumulator struct {
value curves.Point
}
// New creates a new accumulator.
func (acc *Accumulator) New(curve *curves.PairingCurve) (*Accumulator, error) {
// If we need to support non-membership witness, we need to implement Accumulator Initialization
// as described in section 6 of <https://eprint.iacr.org/2020/777.pdf>
// for now we don't need non-membership witness
// i.e., it computes V0 = prod(y + α) * P, y ∈ Y_V0, P is a generator of G1. Since we do not use non-membership witness
// we just set the initial accumulator a G1 generator.
acc.value = curve.Scalar.Point().Generator()
return acc, nil
}
// WithElements initializes a new accumulator prefilled with entries
// Each member is assumed to be hashed
// V = prod(y + α) * V0, for all y∈ Y_V
func (acc *Accumulator) WithElements(curve *curves.PairingCurve, key *SecretKey, m []Element) (*Accumulator, error) {
_, err := acc.New(curve)
if err != nil {
return nil, err
}
y, err := key.BatchAdditions(m)
if err != nil {
return nil, err
}
acc.value = acc.value.Mul(y)
return acc, nil
}
// AddElements accumulates a set of elements into the accumulator.
func (acc *Accumulator) AddElements(key *SecretKey, m []Element) (*Accumulator, error) {
if acc.value == nil || key.value == nil {
return nil, fmt.Errorf("accumulator and secret key should not be nil")
}
y, err := key.BatchAdditions(m)
if err != nil {
return nil, err
}
acc.value = acc.value.Mul(y)
return acc, nil
}
// Add accumulates a single element into the accumulator
// V' = (y + alpha) * V
func (acc *Accumulator) Add(key *SecretKey, e Element) (*Accumulator, error) {
if acc.value == nil || acc.value.IsIdentity() || key.value == nil || e == nil {
return nil, fmt.Errorf("accumulator, secret key and element should not be nil")
}
y := e.Add(key.value) // y + alpha
acc.value = acc.value.Mul(y)
return acc, nil
}
// Remove removes a single element from accumulator if it exists
// V' = 1/(y+alpha) * V
func (acc *Accumulator) Remove(key *SecretKey, e Element) (*Accumulator, error) {
if acc.value == nil || acc.value.IsIdentity() || key.value == nil || e == nil {
return nil, fmt.Errorf("accumulator, secret key and element should not be nil")
}
y := e.Add(key.value) // y + alpha
y, err := y.Invert() // 1/(y+alpha)
if err != nil {
return nil, err
}
acc.value = acc.value.Mul(y)
return acc, nil
}
// Update performs a batch addition and deletion as described on page 7, section 3 in
// https://eprint.iacr.org/2020/777.pdf
func (acc *Accumulator) Update(key *SecretKey, additions []Element, deletions []Element) (*Accumulator, []Coefficient, error) {
if acc.value == nil || acc.value.IsIdentity() || key.value == nil {
return nil, nil, fmt.Errorf("accumulator and secret key should not be nil")
}
// Compute dA(-alpha) = prod(y + alpha), y in the set of A ⊆ ACC-Y_V
a, err := key.BatchAdditions(additions)
if err != nil {
return nil, nil, err
}
// Compute dD(-alpha) = 1/prod(y + alpha), y in the set of D ⊆ Y_V
d, err := key.BatchDeletions(deletions)
if err != nil {
return nil, nil, err
}
// dA(-alpha)/dD(-alpha)
div := a.Mul(d)
newAcc := acc.value.Mul(div)
// build an array of coefficients
elements, err := key.CreateCoefficients(additions, deletions)
if err != nil {
return nil, nil, err
}
coefficients := make([]Coefficient, len(elements))
for i := 0; i < len(elements); i++ {
coefficients[i] = acc.value.Mul(elements[i])
}
acc.value = newAcc
return acc, coefficients, nil
}
// MarshalBinary converts Accumulator to bytes
func (acc Accumulator) MarshalBinary() ([]byte, error) {
if acc.value == nil {
return nil, fmt.Errorf("accumulator cannot be nil")
}
tv := &structMarshal{
Value: acc.value.ToAffineCompressed(),
Curve: acc.value.CurveName(),
}
return bare.Marshal(tv)
}
// UnmarshalBinary sets Accumulator from bytes
func (acc *Accumulator) UnmarshalBinary(data []byte) error {
tv := new(structMarshal)
err := bare.Unmarshal(data, tv)
if err != nil {
return err
}
curve := curves.GetCurveByName(tv.Curve)
if curve == nil {
return fmt.Errorf("invalid curve")
}
value, err := curve.NewIdentityPoint().FromAffineCompressed(tv.Value)
if err != nil {
return err
}
acc.value = value
return nil
}

View File

@ -0,0 +1,188 @@
//
// Copyright Coinbase, Inc. All Rights Reserved.
//
// SPDX-License-Identifier: Apache-2.0
//
package accumulator
import (
"encoding/hex"
"fmt"
"testing"
"github.com/stretchr/testify/require"
"github.com/onsonr/sonr/crypto/core/curves"
)
func TestNewAccumulator100(t *testing.T) {
curve := curves.BLS12381(&curves.PointBls12381G1{})
var seed [32]byte
key, err := new(SecretKey).New(curve, seed[:])
require.NoError(t, err)
require.NotNil(t, key)
acc, err := new(Accumulator).New(curve)
require.NoError(t, err)
accBz, err := acc.MarshalBinary()
require.NoError(t, err)
fmt.Println(accBz)
fmt.Println(len(accBz))
fmt.Println(hex.EncodeToString(accBz))
fmt.Println(len(hex.EncodeToString(accBz)))
require.Equal(t, 60, len(accBz), "Marshalled accumulator should be 60 bytes")
require.Equal(t, 120, len(hex.EncodeToString(accBz)), "Hex-encoded accumulator should be 120 characters")
require.NotNil(t, acc)
require.Equal(t, acc.value.ToAffineCompressed(), curve.PointG1.Generator().ToAffineCompressed())
}
func TestNewAccumulator10K(t *testing.T) {
curve := curves.BLS12381(&curves.PointBls12381G1{})
var seed [32]byte
key, err := new(SecretKey).New(curve, seed[:])
require.NoError(t, err)
require.NotNil(t, key)
acc, err := new(Accumulator).New(curve)
require.NoError(t, err)
require.NotNil(t, acc)
require.Equal(t, acc.value.ToAffineCompressed(), curve.PointG1.Generator().ToAffineCompressed())
}
func TestNewAccumulator10M(t *testing.T) {
// Initiating 10M values takes time
if testing.Short() {
t.Skip("skipping test in short mode.")
}
curve := curves.BLS12381(&curves.PointBls12381G1{})
var seed [32]byte
key, err := new(SecretKey).New(curve, seed[:])
require.NoError(t, err)
require.NotNil(t, key)
acc, err := new(Accumulator).New(curve)
require.NoError(t, err)
require.NotNil(t, acc)
require.Equal(t, acc.value.ToAffineCompressed(), curve.PointG1.Generator().ToAffineCompressed())
}
func TestWithElements(t *testing.T) {
curve := curves.BLS12381(&curves.PointBls12381G1{})
var seed [32]byte
key, _ := new(SecretKey).New(curve, seed[:])
element1 := curve.Scalar.Hash([]byte("value1"))
element2 := curve.Scalar.Hash([]byte("value2"))
elements := []Element{element1, element2}
newAcc, err := new(Accumulator).WithElements(curve, key, elements)
require.NoError(t, err)
require.NotNil(t, newAcc)
require.NotEqual(t, newAcc.value.ToAffineCompressed(), curve.PointG1.Identity().ToAffineCompressed())
require.NotEqual(t, newAcc.value.ToAffineCompressed(), curve.PointG1.Generator().ToAffineCompressed())
_, _ = newAcc.Remove(key, element1)
_, _ = newAcc.Remove(key, element2)
require.Equal(t, newAcc.value.ToAffineCompressed(), curve.PointG1.Generator().ToAffineCompressed())
}
func TestAdd(t *testing.T) {
curve := curves.BLS12381(&curves.PointBls12381G1{})
var seed [32]byte
key, err := new(SecretKey).New(curve, seed[:])
require.NoError(t, err)
require.NotNil(t, key)
acc := &Accumulator{curve.PointG1.Generator()}
_, _ = acc.New(curve)
require.NoError(t, err)
require.NotNil(t, acc)
element := curve.Scalar.Hash([]byte("value1"))
require.NoError(t, err)
require.NotNil(t, element)
_, _ = acc.Add(key, element)
require.NotEqual(t, acc.value.ToAffineCompressed(), curve.PointG1.Generator().ToAffineCompressed())
}
func TestRemove(t *testing.T) {
curve := curves.BLS12381(&curves.PointBls12381G1{})
var seed [32]byte
key, err := new(SecretKey).New(curve, seed[:])
require.NoError(t, err)
require.NotNil(t, key)
acc, err := new(Accumulator).New(curve)
require.NoError(t, err)
require.NotNil(t, acc)
require.Equal(t, acc.value.ToAffineCompressed(), curve.PointG1.Generator().ToAffineCompressed())
element := curve.Scalar.Hash([]byte("value1"))
require.NoError(t, err)
require.NotNil(t, element)
// add element
_, _ = acc.Add(key, element)
require.NotEqual(t, acc.value.ToAffineCompressed(), curve.PointG1.Generator().ToAffineCompressed())
// remove element
acc, err = acc.Remove(key, element)
require.NoError(t, err)
require.Equal(t, acc.value.ToAffineCompressed(), curve.PointG1.Generator().ToAffineCompressed())
}
func TestAddElements(t *testing.T) {
curve := curves.BLS12381(&curves.PointBls12381G1{})
var seed [32]byte
key, err := new(SecretKey).New(curve, seed[:])
require.NoError(t, err)
require.NotNil(t, key)
acc := &Accumulator{curve.PointG1.Generator()}
_, _ = acc.New(curve)
require.NoError(t, err)
require.NotNil(t, acc)
require.Equal(t, acc.value.ToAffineCompressed(), curve.PointG1.Generator().ToAffineCompressed())
element1 := curve.Scalar.Hash([]byte("value1"))
element2 := curve.Scalar.Hash([]byte("value2"))
element3 := curve.Scalar.Hash([]byte("value3"))
elements := []Element{element1, element2, element3}
acc, err = acc.AddElements(key, elements)
require.NoError(t, err)
require.NotEqual(t, acc.value.ToAffineCompressed(), curve.PointG1.Generator().ToAffineCompressed())
}
func TestAccumulatorMarshal(t *testing.T) {
curve := curves.BLS12381(&curves.PointBls12381G1{})
point := curve.PointG1.Generator().Mul(curve.Scalar.New(2))
data, err := Accumulator{point}.MarshalBinary()
require.NoError(t, err)
require.NotNil(t, data)
// element cannot be empty
_, err = Accumulator{}.MarshalBinary()
require.Error(t, err)
e := &Accumulator{curve.PointG1.Generator()}
_ = e.UnmarshalBinary(data)
require.True(t, e.value.Equal(point))
}
func TestUpdate(t *testing.T) {
curve := curves.BLS12381(&curves.PointBls12381G1{})
var seed [32]byte
key, err := new(SecretKey).New(curve, seed[:])
require.NoError(t, err)
require.NotNil(t, key)
acc, err := new(Accumulator).New(curve)
require.NoError(t, err)
require.NotNil(t, acc)
require.Equal(t, acc.value.ToAffineCompressed(), curve.PointG1.Generator().ToAffineCompressed())
element1 := curve.Scalar.Hash([]byte("value1"))
element2 := curve.Scalar.Hash([]byte("value2"))
element3 := curve.Scalar.Hash([]byte("value3"))
elements := []Element{element1, element2, element3}
acc, _, err = acc.Update(key, elements, nil)
require.NoError(t, err)
require.NotEqual(t, acc.value.ToAffineCompressed(), curve.PointG1.Generator().ToAffineCompressed())
acc, _, err = acc.Update(key, nil, elements)
require.NoError(t, err)
require.Equal(t, acc.value.ToAffineCompressed(), curve.PointG1.Generator().ToAffineCompressed())
}

244
crypto/accumulator/key.go Executable file
View File

@ -0,0 +1,244 @@
//
// Copyright Coinbase, Inc. All Rights Reserved.
//
// SPDX-License-Identifier: Apache-2.0
//
package accumulator
import (
"errors"
"fmt"
"git.sr.ht/~sircmpwn/go-bare"
"github.com/onsonr/sonr/crypto/core/curves"
)
// SecretKey is the secret alpha only held by the accumulator manager.
type SecretKey struct {
value curves.Scalar
}
// New creates a new secret key from the seed.
func (sk *SecretKey) New(curve *curves.PairingCurve, seed []byte) (*SecretKey, error) {
sk.value = curve.Scalar.Hash(seed)
return sk, nil
}
// GetPublicKey creates a public key from SecretKey sk
func (sk SecretKey) GetPublicKey(curve *curves.PairingCurve) (*PublicKey, error) {
if sk.value == nil || curve == nil {
return nil, fmt.Errorf("curve and sk value cannot be nil")
}
value := curve.Scalar.Point().(curves.PairingPoint).OtherGroup().Generator().Mul(sk.value)
return &PublicKey{value.(curves.PairingPoint)}, nil
}
// MarshalBinary converts SecretKey to bytes
func (sk SecretKey) MarshalBinary() ([]byte, error) {
if sk.value == nil {
return nil, fmt.Errorf("sk cannot be empty")
}
tv := &structMarshal{
Value: sk.value.Bytes(),
Curve: sk.value.Point().CurveName(),
}
return bare.Marshal(tv)
}
// UnmarshalBinary sets SecretKey from bytes
func (sk *SecretKey) UnmarshalBinary(data []byte) error {
tv := new(structMarshal)
err := bare.Unmarshal(data, tv)
if err != nil {
return err
}
curve := curves.GetCurveByName(tv.Curve)
if curve == nil {
return fmt.Errorf("invalid curve")
}
value, err := curve.NewScalar().SetBytes(tv.Value)
if err != nil {
return err
}
sk.value = value
return nil
}
// BatchAdditions computes product(y + sk) for y in additions and output the product
func (sk SecretKey) BatchAdditions(additions []Element) (Element, error) {
if sk.value == nil {
return nil, fmt.Errorf("secret key cannot be empty")
}
mul := sk.value.One()
for i := 0; i < len(additions); i++ {
if additions[i] == nil {
return nil, fmt.Errorf("some element in additions is nil")
}
// y + alpha
temp := additions[i].Add(sk.value)
// prod(y + alpha)
mul = mul.Mul(temp)
}
return mul, nil
}
// BatchDeletions computes 1/product(y + sk) for y in deletions and output it
func (sk SecretKey) BatchDeletions(deletions []Element) (Element, error) {
v, err := sk.BatchAdditions(deletions)
if err != nil {
return nil, err
}
y, err := v.Invert()
if err != nil {
return nil, err
}
return y, nil
}
// CreateCoefficients creates the Batch Polynomial coefficients
// See page 7 of https://eprint.iacr.org/2020/777.pdf
func (sk SecretKey) CreateCoefficients(additions []Element, deletions []Element) ([]Element, error) {
if sk.value == nil {
return nil, fmt.Errorf("secret key should not be nil")
}
// vD(x) = ∑^{m}_{s=1}{ ∏ 1..s {yD_i + alpha}^-1 ∏ 1 ..s-1 {yD_j - x}
one := sk.value.One()
m1 := one.Neg() // m1 is -1
vD := make(polynomial, 0, len(deletions))
for s := 0; s < len(deletions); s++ {
// ∏ 1..s (yD_i + alpha)^-1
c, err := sk.BatchDeletions(deletions[0 : s+1])
if err != nil {
return nil, fmt.Errorf("error in sk batchDeletions")
}
poly := make(polynomial, 1, s+2)
poly[0] = one
// ∏ 1..(s-1) (yD_j - x)
for j := 0; j < s; j++ {
t := make(polynomial, 2)
// yD_j
t[0] = deletions[j]
// -x
t[1] = m1
// polynomial multiplication (yD_1-x) * (yD_2 - x) ...
poly, err = poly.Mul(t)
if err != nil {
return nil, err
}
}
poly, err = poly.MulScalar(c)
if err != nil {
return nil, err
}
vD, err = vD.Add(poly)
if err != nil {
return nil, err
}
}
// vD(x) * ∏ 1..n (yA_i + alpha)
bAdd, err := sk.BatchAdditions(additions)
if err != nil {
return nil, fmt.Errorf("error in sk batchAdditions")
}
vD, err = vD.MulScalar(bAdd)
if err != nil {
return nil, err
}
// vA(x) = ∑^n_{s=1}{ ∏ 1..s-1 {yA_i + alpha} ∏ s+1..n {yA_j - x} }
vA := make(polynomial, 0, len(additions))
for s := 0; s < len(additions); s++ {
// ∏ 1..s-1 {yA_i + alpha}
var c Element
if s == 0 {
c = one
} else {
c, err = sk.BatchAdditions(additions[0:s])
if err != nil {
return nil, err
}
}
poly := make(polynomial, 1, s+2)
poly[0] = one
// ∏ s+1..n {yA_j - x}
for j := s + 1; j < len(additions); j++ {
t := make(polynomial, 2)
t[0] = additions[j]
t[1] = m1
// polynomial multiplication (yA_1-x) * (yA_2 - x) ...
poly, err = poly.Mul(t)
if err != nil {
return nil, err
}
}
poly, err = poly.MulScalar(c)
if err != nil {
return nil, err
}
vA, err = vA.Add(poly)
if err != nil {
return nil, err
}
}
// vA - vD
vA, err = vA.Sub(vD)
if err != nil {
return nil, err
}
result := make([]Element, len(vA))
for i := 0; i < len(vA); i++ {
result[i] = vA[i]
}
return result, nil
}
// PublicKey is the public key of accumulator, it should be sk * generator of G2
type PublicKey struct {
value curves.PairingPoint
}
// MarshalBinary converts PublicKey to bytes
func (pk PublicKey) MarshalBinary() ([]byte, error) {
if pk.value == nil {
return nil, fmt.Errorf("public key cannot be nil")
}
tv := &structMarshal{
Value: pk.value.ToAffineCompressed(),
Curve: pk.value.CurveName(),
}
return bare.Marshal(tv)
}
// UnmarshalBinary sets PublicKey from bytes
func (pk *PublicKey) UnmarshalBinary(data []byte) error {
tv := new(structMarshal)
err := bare.Unmarshal(data, tv)
if err != nil {
return err
}
curve := curves.GetPairingCurveByName(tv.Curve)
if curve == nil {
return fmt.Errorf("invalid curve")
}
value, err := curve.NewScalar().Point().FromAffineCompressed(tv.Value)
if err != nil {
return err
}
var ok bool
pk.value, ok = value.(curves.PairingPoint)
if !ok {
return errors.New("can't convert to PairingPoint")
}
return nil
}

88
crypto/accumulator/key_test.go Executable file
View File

@ -0,0 +1,88 @@
//
// Copyright Coinbase, Inc. All Rights Reserved.
//
// SPDX-License-Identifier: Apache-2.0
//
package accumulator
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/onsonr/sonr/crypto/core/curves"
)
func TestSecretKeyMarshal(t *testing.T) {
curve := curves.BLS12381(&curves.PointBls12381G1{})
data, err := SecretKey{curve.Scalar.One()}.MarshalBinary()
require.NoError(t, err)
require.NotNil(t, data)
e := &SecretKey{curve.Scalar.New(2)}
err = e.UnmarshalBinary(data)
require.NoError(t, err)
require.Equal(t, e.value.Bytes(), curve.Scalar.One().Bytes())
// element cannot be empty
_, err = SecretKey{}.MarshalBinary()
require.Error(t, err)
}
func TestPublicKeyMarshal(t *testing.T) {
// Actually test both toBytes() and from()
curve := curves.BLS12381(&curves.PointBls12381G1{})
sk := &SecretKey{curve.Scalar.New(3)}
pk, _ := sk.GetPublicKey(curve)
pkBytes, err := pk.MarshalBinary()
require.NoError(t, err)
require.NotNil(t, pkBytes)
pk2 := &PublicKey{}
err = pk2.UnmarshalBinary(pkBytes)
require.NoError(t, err)
require.True(t, pk.value.Equal(pk2.value))
}
func TestBatch(t *testing.T) {
curve := curves.BLS12381(&curves.PointBls12381G1{})
var seed [32]byte
sk, _ := new(SecretKey).New(curve, seed[:])
element1 := curve.Scalar.Hash([]byte("value1"))
element2 := curve.Scalar.Hash([]byte("value2"))
elements := []Element{element1, element2}
add, err := sk.BatchAdditions(elements)
require.NoError(t, err)
require.NotNil(t, add)
del, err := sk.BatchDeletions(elements)
require.NoError(t, err)
require.NotNil(t, del)
result := add.Mul(del)
require.Equal(t, result, curve.Scalar.One())
g1 := curve.PointG1.Generator()
acc := g1.Mul(add)
require.NotEqual(t, acc, g1)
acc = acc.Mul(del)
require.Equal(t, acc.ToAffineCompressed(), g1.ToAffineCompressed())
acc2 := g1.Mul(result)
require.True(t, acc2.Equal(g1))
}
func TestCoefficient(t *testing.T) {
curve := curves.BLS12381(&curves.PointBls12381G1{})
sk, _ := new(SecretKey).New(curve, []byte("1234567890"))
element1 := curve.Scalar.Hash([]byte("value1"))
element2 := curve.Scalar.Hash([]byte("value2"))
element3 := curve.Scalar.Hash([]byte("value3"))
element4 := curve.Scalar.Hash([]byte("value4"))
element5 := curve.Scalar.Hash([]byte("value5"))
elements := []Element{element1, element2, element3, element4, element5}
coefficients, err := sk.CreateCoefficients(elements[0:2], elements[2:5])
require.NoError(t, err)
require.Equal(t, len(coefficients), 3)
}

204
crypto/accumulator/lib.go Executable file
View File

@ -0,0 +1,204 @@
//
// Copyright Coinbase, Inc. All Rights Reserved.
//
// SPDX-License-Identifier: Apache-2.0
//
package accumulator
import (
"fmt"
"math"
"github.com/onsonr/sonr/crypto/core/curves"
)
// dad constructs two polynomials - dA(x) and dD(x)
// dA(y) = prod(y_A,t - y), t = 1...n
// dD(y) = prod(y_D,t - y), t = 1...n
func dad(values []Element, y Element) (Element, error) {
if values == nil || y == nil {
return nil, fmt.Errorf("curve, values or y should not be nil")
}
for _, value := range values {
if value == nil {
return nil, fmt.Errorf("some element is nil")
}
}
result := y.One()
if len(values) == 1 {
a := values[0]
result = a.Sub(y)
} else {
for i := 0; i < len(values); i++ {
temp := values[i].Sub(y)
result = result.Mul(temp)
}
}
return result, nil
}
type polynomialPoint []curves.Point
// evaluate evaluates a PolynomialG1 on input x.
func (p polynomialPoint) evaluate(x curves.Scalar) (curves.Point, error) {
if p == nil {
return nil, fmt.Errorf("p cannot be empty")
}
for i := 0; i < len(p); i++ {
if p[i] == nil {
return nil, fmt.Errorf("some coefficient in p is nil")
}
}
pp := x
res := p[0]
for i := 1; i < len(p); i++ {
r := p[i].Mul(pp)
res = res.Add(r)
pp = pp.Mul(x)
}
return res, nil
}
// Add adds two PolynomialG1
func (p polynomialPoint) Add(rhs polynomialPoint) (polynomialPoint, error) {
maxLen := int(math.Max(float64(len(p)), float64(len(rhs))))
result := make(polynomialPoint, maxLen)
for i, c := range p {
if c == nil {
return nil, fmt.Errorf("invalid coefficient at %d", i)
}
result[i] = c.Add(c.Identity())
}
for i, c := range rhs {
if c == nil {
return nil, fmt.Errorf("invalid coefficient at %d", i)
}
if result[i] == nil {
result[i] = c.Add(c.Identity())
} else {
result[i] = result[i].Add(c)
}
}
return result, nil
}
// Mul for PolynomialG1 computes rhs * p, p is a polynomial, rhs is a value
func (p polynomialPoint) Mul(rhs curves.Scalar) (polynomialPoint, error) {
result := make(polynomialPoint, len(p))
for i, c := range p {
if c == nil {
return nil, fmt.Errorf("invalid coefficient at %d", i)
}
result[i] = c.Mul(rhs)
}
return result, nil
}
type polynomial []curves.Scalar
// Add adds two polynomials
func (p polynomial) Add(rhs polynomial) (polynomial, error) {
maxLen := int(math.Max(float64(len(p)), float64(len(rhs))))
result := make([]curves.Scalar, maxLen)
for i, c := range p {
if c == nil {
return nil, fmt.Errorf("invalid coefficient at %d", i)
}
result[i] = c.Clone()
}
for i, c := range rhs {
if c == nil {
return nil, fmt.Errorf("invalid coefficient at %d", i)
}
if result[i] == nil {
result[i] = c.Clone()
} else {
result[i] = result[i].Add(c)
}
}
return result, nil
}
// Sub computes p-rhs and returns
func (p polynomial) Sub(rhs polynomial) (polynomial, error) {
maxLen := int(math.Max(float64(len(p)), float64(len(rhs))))
result := make([]curves.Scalar, maxLen)
for i, c := range p {
if c == nil {
return nil, fmt.Errorf("invalid coefficient at %d", i)
}
result[i] = c.Clone()
}
for i, c := range rhs {
if c == nil {
return nil, fmt.Errorf("invalid coefficient at %d", i)
}
if result[i] == nil {
result[i] = c.Neg()
} else {
result[i] = result[i].Sub(c)
}
}
return result, nil
}
// Mul multiplies two polynomials - p * rhs
func (p polynomial) Mul(rhs polynomial) (polynomial, error) {
// Check for each coefficient that should not be nil
for i, c := range p {
if c == nil {
return nil, fmt.Errorf("coefficient in p at %d is nil", i)
}
}
for i, c := range rhs {
if c == nil {
return nil, fmt.Errorf("coefficient in rhs at %d is nil", i)
}
}
m := len(p)
n := len(rhs)
// Initialize the product polynomial
prod := make(polynomial, m+n-1)
for i := 0; i < len(prod); i++ {
prod[i] = p[0].Zero()
}
// Multiply two polynomials term by term
for i, cp := range p {
for j, cr := range rhs {
temp := cp.Mul(cr)
prod[i+j] = prod[i+j].Add(temp)
}
}
return prod, nil
}
// MulScalar computes p * rhs, where rhs is a scalar value
func (p polynomial) MulScalar(rhs curves.Scalar) (polynomial, error) {
result := make(polynomial, len(p))
for i, c := range p {
if c == nil {
return nil, fmt.Errorf("coefficient at %d is nil", i)
}
result[i] = c.Mul(rhs)
}
return result, nil
}

404
crypto/accumulator/lib_test.go Executable file
View File

@ -0,0 +1,404 @@
//
// Copyright Coinbase, Inc. All Rights Reserved.
//
// SPDX-License-Identifier: Apache-2.0
//
package accumulator
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/onsonr/sonr/crypto/core/curves"
)
func TestEvaluatePolyG1(t *testing.T) {
curve := curves.BLS12381(&curves.PointBls12381G1{})
poly := polynomialPoint{
curve.PointG1.Generator().Mul(curve.Scalar.New(3)),
curve.PointG1.Generator().Mul(curve.Scalar.New(2)),
curve.PointG1.Generator().Mul(curve.Scalar.New(1)),
}
output1, err := poly.evaluate(curve.Scalar.New(1))
require.NoError(t, err)
require.NotNil(t, output1)
result1 := curve.PointG1.Generator().Mul(curve.Scalar.New(6))
require.Equal(t, output1.ToAffineCompressed(), result1.ToAffineCompressed())
output2, err := poly.evaluate(curve.Scalar.New(2))
require.NoError(t, err)
require.NotNil(t, output2)
result2 := curve.PointG1.Generator().Mul(curve.Scalar.New(11))
require.Equal(t, output2.ToAffineCompressed(), result2.ToAffineCompressed())
}
func TestEvaluatePolyG1Error(t *testing.T) {
curve := curves.BLS12381(&curves.PointBls12381G1{})
poly := polynomialPoint{
nil,
curve.PointG1.Generator().Mul(curve.Scalar.New(2)),
curve.PointG1.Generator().Mul(curve.Scalar.New(1)),
}
_, err := poly.evaluate(curve.Scalar.New(1))
require.Error(t, err)
}
func TestAddAssignPolyG1(t *testing.T) {
curve := curves.BLS12381(&curves.PointBls12381G1{})
// Test polynomial with equal length
poly1 := polynomialPoint{
curve.PointG1.Generator().Mul(curve.Scalar.New(3)),
curve.PointG1.Generator().Mul(curve.Scalar.New(2)),
curve.PointG1.Generator().Mul(curve.Scalar.New(1)),
}
poly2 := polynomialPoint{
curve.PointG1.Generator().Mul(curve.Scalar.New(1)),
curve.PointG1.Generator().Mul(curve.Scalar.New(2)),
curve.PointG1.Generator().Mul(curve.Scalar.New(3)),
}
output, err := poly1.Add(poly2)
require.NoError(t, err)
require.NotNil(t, output)
result := polynomialPoint{
curve.PointG1.Generator().Mul(curve.Scalar.New(4)),
curve.PointG1.Generator().Mul(curve.Scalar.New(4)),
curve.PointG1.Generator().Mul(curve.Scalar.New(4)),
}
for i := 0; i < len(output); i++ {
require.Equal(t, output[i].ToAffineCompressed(), result[i].ToAffineCompressed())
}
// Test polynomials with unequal length
poly3 := polynomialPoint{
curve.PointG1.Generator().Mul(curve.Scalar.New(1)),
curve.PointG1.Generator().Mul(curve.Scalar.New(2)),
}
output2, err := poly1.Add(poly3)
require.NoError(t, err)
require.NotNil(t, output2)
result2 := polynomialPoint{
curve.PointG1.Generator().Mul(curve.Scalar.New(4)),
curve.PointG1.Generator().Mul(curve.Scalar.New(4)),
curve.PointG1.Generator().Mul(curve.Scalar.New(1)),
}
require.Equal(t, len(output2), len(result2))
for i := 0; i < len(output2); i++ {
require.Equal(t, output2[i].ToAffineCompressed(), result2[i].ToAffineCompressed())
}
// Test polynomial with Capacity
poly4 := make(polynomialPoint, 0, 3)
poly5, err := poly4.Add(poly1)
require.NoError(t, err)
require.Equal(t, len(poly5), len(poly1))
for i := 0; i < len(poly5); i++ {
require.Equal(t, poly5[i].ToAffineCompressed(), poly1[i].ToAffineCompressed())
}
}
func TestAddAssignPolyG1Error(t *testing.T) {
curve := curves.BLS12381(&curves.PointBls12381G1{})
poly1 := polynomialPoint{
nil,
curve.PointG1.Generator().Mul(curve.Scalar.New(2)),
curve.PointG1.Generator().Mul(curve.Scalar.New(1)),
}
poly2 := polynomialPoint{
curve.PointG1.Generator().Mul(curve.Scalar.New(1)),
curve.PointG1.Generator().Mul(curve.Scalar.New(2)),
curve.PointG1.Generator().Mul(curve.Scalar.New(3)),
}
output, err := poly1.Add(poly2)
require.Error(t, err)
require.Nil(t, output)
}
func TestMulAssignPolyG1(t *testing.T) {
curve := curves.BLS12381(&curves.PointBls12381G1{})
poly := polynomialPoint{
curve.PointG1.Generator().Mul(curve.Scalar.New(3)),
curve.PointG1.Generator().Mul(curve.Scalar.New(2)),
curve.PointG1.Generator().Mul(curve.Scalar.New(1)),
}
rhs := curve.Scalar.New(3)
output, err := poly.Mul(rhs)
require.NoError(t, err)
require.NotNil(t, output)
poly2 := polynomialPoint{
curve.PointG1.Generator().Mul(curve.Scalar.New(9)),
curve.PointG1.Generator().Mul(curve.Scalar.New(6)),
curve.PointG1.Generator().Mul(curve.Scalar.New(3)),
}
for i := 0; i < len(poly2); i++ {
require.Equal(t, output[i].ToAffineCompressed(), poly2[i].ToAffineCompressed())
}
}
func TestMulAssignPolyG1Error(t *testing.T) {
curve := curves.BLS12381(&curves.PointBls12381G1{})
poly := polynomialPoint{
nil,
curve.PointG1.Generator().Mul(curve.Scalar.New(2)),
curve.PointG1.Generator().Mul(curve.Scalar.New(1)),
}
rhs := curve.Scalar.New(3)
output, err := poly.Mul(rhs)
require.Error(t, err)
require.Nil(t, output)
}
func TestPushPoly(t *testing.T) {
curve := curves.BLS12381(&curves.PointBls12381G1{})
poly := polynomial{
curve.Scalar.New(3),
curve.Scalar.New(2),
curve.Scalar.New(1),
}
scalar := curve.Scalar.New(4)
result := append(poly, scalar)
require.Equal(t, result[3], scalar)
// Push one more
scalar2 := curve.Scalar.New(5)
result2 := append(result, scalar2)
require.Equal(t, result2[4], scalar2)
// Push to a new polynomial
newPoly := polynomial{}
newPoly = append(newPoly, scalar)
require.Equal(t, newPoly[0], scalar)
newPoly = append(newPoly, scalar2)
require.Equal(t, newPoly[1], scalar2)
}
func TestAddAssignPoly(t *testing.T) {
curve := curves.BLS12381(&curves.PointBls12381G1{})
// Test polynomial with equal length
poly1 := polynomial{
curve.Scalar.New(3),
curve.Scalar.New(2),
curve.Scalar.New(1),
}
poly2 := polynomial{
curve.Scalar.New(1),
curve.Scalar.New(2),
curve.Scalar.New(3),
}
output, err := poly1.Add(poly2)
require.NoError(t, err)
require.NotNil(t, output)
result := []curves.Scalar{
curve.Scalar.New(4),
curve.Scalar.New(4),
curve.Scalar.New(4),
}
for i := 0; i < len(output); i++ {
require.Equal(t, output[i], result[i])
}
// Test polynomials with unequal length
poly3 := polynomial{
curve.Scalar.New(1),
curve.Scalar.New(2),
}
output2, err := poly1.Add(poly3)
require.NoError(t, err)
require.NotNil(t, output2)
result2 := []curves.Scalar{
curve.Scalar.New(4),
curve.Scalar.New(4),
curve.Scalar.New(1),
}
require.Equal(t, len(output2), len(result2))
for i := 0; i < len(output2); i++ {
require.Equal(t, output2[i], result2[i])
}
}
func TestAddAssignPolyError(t *testing.T) {
curve := curves.BLS12381(&curves.PointBls12381G1{})
// Test polynomial with equal length
poly1 := polynomial{
nil,
curve.Scalar.New(2),
curve.Scalar.New(1),
}
poly2 := polynomial{
curve.Scalar.New(1),
curve.Scalar.New(2),
curve.Scalar.New(3),
}
output, err := poly1.Add(poly2)
require.Error(t, err)
require.Nil(t, output)
}
func TestSubAssignPoly(t *testing.T) {
curve := curves.BLS12381(&curves.PointBls12381G1{})
// Test polynomial with equal length
poly1 := polynomial{
curve.Scalar.New(3),
curve.Scalar.New(2),
curve.Scalar.New(1),
}
poly2 := polynomial{
curve.Scalar.New(1),
curve.Scalar.New(2),
curve.Scalar.New(3),
}
output, err := poly1.Sub(poly2)
require.NoError(t, err)
require.NotNil(t, output)
result := []curves.Scalar{
curve.Scalar.New(2),
curve.Scalar.New(0),
curve.Scalar.New(-2),
}
for i := 0; i < len(output); i++ {
require.Equal(t, output[i].Bytes(), result[i].Bytes())
}
// Test polynomials with unequal length
poly3 := polynomial{
curve.Scalar.New(1),
curve.Scalar.New(2),
curve.Scalar.New(3),
curve.Scalar.New(4),
}
output2, err := poly1.Sub(poly3)
require.NoError(t, err)
require.NotNil(t, output2)
result2 := []curves.Scalar{
curve.Scalar.New(2),
curve.Scalar.New(0),
curve.Scalar.New(-2),
curve.Scalar.New(-4),
}
require.Equal(t, len(output2), len(result2))
for i := 0; i < len(output2); i++ {
require.Equal(t, output2[i].Bytes(), result2[i].Bytes())
}
}
func TestSubAssignPolyError(t *testing.T) {
curve := curves.BLS12381(&curves.PointBls12381G1{})
poly1 := polynomial{
nil,
curve.Scalar.New(2),
curve.Scalar.New(1),
}
poly2 := polynomial{
curve.Scalar.New(1),
curve.Scalar.New(2),
curve.Scalar.New(3),
}
output, err := poly1.Sub(poly2)
require.Error(t, err)
require.Nil(t, output)
}
func TestMulAssignPoly(t *testing.T) {
curve := curves.BLS12381(&curves.PointBls12381G1{})
// Test polynomial with equal length
poly1 := polynomial{
curve.Scalar.New(3),
curve.Scalar.New(2),
curve.Scalar.New(1),
}
poly2 := polynomial{
curve.Scalar.New(1),
curve.Scalar.New(2),
curve.Scalar.New(3),
}
output, err := poly1.Mul(poly2)
require.NoError(t, err)
require.NotNil(t, output)
result := []curves.Scalar{
curve.Scalar.New(3),
curve.Scalar.New(8),
curve.Scalar.New(14),
curve.Scalar.New(8),
curve.Scalar.New(3),
}
for i := 0; i < len(result); i++ {
require.Equal(t, output[i].Bytes(), result[i].Bytes())
}
// Test polynomials with unequal length
poly3 := polynomial{
curve.Scalar.New(1),
curve.Scalar.New(2),
}
output2, err := poly1.Mul(poly3)
require.NoError(t, err)
require.NotNil(t, output2)
result2 := []curves.Scalar{
curve.Scalar.New(3),
curve.Scalar.New(8),
curve.Scalar.New(5),
curve.Scalar.New(2),
}
require.Equal(t, len(output2), 4)
for i := 0; i < len(output2); i++ {
require.Equal(t, output2[i].Bytes(), result2[i].Bytes())
}
}
func TestMulAssignPolyError(t *testing.T) {
curve := curves.BLS12381(&curves.PointBls12381G1{})
poly1 := polynomial{
nil,
curve.Scalar.New(2),
curve.Scalar.New(1),
}
poly2 := polynomial{
curve.Scalar.New(1),
curve.Scalar.New(2),
curve.Scalar.New(3),
}
output, err := poly1.Mul(poly2)
require.Error(t, err)
require.Nil(t, output)
}
func TestMulValueAssignPoly(t *testing.T) {
curve := curves.BLS12381(&curves.PointBls12381G1{})
poly := polynomial{
curve.Scalar.New(3),
curve.Scalar.New(2),
curve.Scalar.New(1),
}
rhs := curve.Scalar.New(3)
output, err := poly.MulScalar(rhs)
require.NoError(t, err)
require.NotNil(t, output)
coefficients2 := []curves.Scalar{
curve.Scalar.New(9),
curve.Scalar.New(6),
curve.Scalar.New(3),
}
for i := 0; i < len(coefficients2); i++ {
require.Equal(t, output[i].Bytes(), coefficients2[i].Bytes())
}
}
func TestMulValueAssignPolyError(t *testing.T) {
curve := curves.BLS12381(&curves.PointBls12381G1{})
poly := polynomial{
nil,
curve.Scalar.New(2),
curve.Scalar.New(1),
}
rhs := curve.Scalar.New(3)
output, err := poly.MulScalar(rhs)
require.Error(t, err)
require.Nil(t, output)
}

518
crypto/accumulator/proof.go Executable file
View File

@ -0,0 +1,518 @@
//
// Copyright Coinbase, Inc. All Rights Reserved.
//
// SPDX-License-Identifier: Apache-2.0
//
package accumulator
import (
"bytes"
crand "crypto/rand"
"errors"
"fmt"
"git.sr.ht/~sircmpwn/go-bare"
"github.com/onsonr/sonr/crypto/core/curves"
)
type proofParamsMarshal struct {
X []byte `bare:"x"`
Y []byte `bare:"y"`
Z []byte `bare:"z"`
Curve string `bare:"curve"`
}
// ProofParams contains four distinct public generators of G1 - X, Y, Z
type ProofParams struct {
x, y, z curves.Point
}
// New samples X, Y, Z, K
func (p *ProofParams) New(curve *curves.PairingCurve, pk *PublicKey, entropy []byte) (*ProofParams, error) {
pkBytes, err := pk.MarshalBinary()
if err != nil {
return nil, err
}
prefix := bytes.Repeat([]byte{0xFF}, 32)
data := append(prefix, entropy...)
data = append(data, pkBytes...)
p.z = curve.Scalar.Point().Hash(data)
data[0] = 0xFE
p.y = curve.Scalar.Point().Hash(data)
data[0] = 0xFD
p.x = curve.Scalar.Point().Hash(data)
return p, nil
}
// MarshalBinary converts ProofParams to bytes
func (p *ProofParams) MarshalBinary() ([]byte, error) {
if p.x == nil || p.y == nil || p.z == nil {
return nil, fmt.Errorf("some value x, y, or z is nil")
}
tv := &proofParamsMarshal{
X: p.x.ToAffineCompressed(),
Y: p.y.ToAffineCompressed(),
Z: p.z.ToAffineCompressed(),
Curve: p.x.CurveName(),
}
return bare.Marshal(tv)
}
// UnmarshalBinary converts bytes to ProofParams
func (p *ProofParams) UnmarshalBinary(data []byte) error {
if data == nil {
return fmt.Errorf("expected non-zero byte sequence")
}
tv := new(proofParamsMarshal)
err := bare.Unmarshal(data, tv)
if err != nil {
return err
}
curve := curves.GetCurveByName(tv.Curve)
if curve == nil {
return fmt.Errorf("invalid curve")
}
x, err := curve.NewIdentityPoint().FromAffineCompressed(tv.X)
if err != nil {
return err
}
y, err := curve.NewIdentityPoint().FromAffineCompressed(tv.Y)
if err != nil {
return err
}
z, err := curve.NewIdentityPoint().FromAffineCompressed(tv.Z)
if err != nil {
return err
}
p.x = x
p.y = y
p.z = z
return nil
}
// MembershipProofCommitting contains value computed in Proof of knowledge and
// Blinding phases as described in section 7 of https://eprint.iacr.org/2020/777.pdf
type MembershipProofCommitting struct {
eC curves.Point
tSigma curves.Point
tRho curves.Point
deltaSigma curves.Scalar
deltaRho curves.Scalar
blindingFactor curves.Scalar
rSigma curves.Scalar
rRho curves.Scalar
rDeltaSigma curves.Scalar
rDeltaRho curves.Scalar
sigma curves.Scalar
rho curves.Scalar
capRSigma curves.Point
capRRho curves.Point
capRDeltaSigma curves.Point
capRDeltaRho curves.Point
capRE curves.Scalar
accumulator curves.Point
witnessValue curves.Scalar
xG1 curves.Point
yG1 curves.Point
zG1 curves.Point
}
// New initiates values of MembershipProofCommitting
func (mpc *MembershipProofCommitting) New(
witness *MembershipWitness,
acc *Accumulator,
pp *ProofParams,
pk *PublicKey,
) (*MembershipProofCommitting, error) {
// Randomly select σ, ρ
sigma := witness.y.Random(crand.Reader)
rho := witness.y.Random(crand.Reader)
// E_C = C + (σ + ρ)Z
t := sigma
t = t.Add(rho)
eC := pp.z
eC = eC.Mul(t)
eC = eC.Add(witness.c)
// T_σ = σX
tSigma := pp.x
tSigma = tSigma.Mul(sigma)
// T_ρ = ρY
tRho := pp.y
tRho = tRho.Mul(rho)
// δ_σ = yσ
deltaSigma := witness.y
deltaSigma = deltaSigma.Mul(sigma)
// δ_ρ = yρ
deltaRho := witness.y
deltaRho = deltaRho.Mul(rho)
// Randomly pick r_σ,r_ρ,r_δσ,r_δρ
rY := witness.y.Random(crand.Reader)
rSigma := witness.y.Random(crand.Reader)
rRho := witness.y.Random(crand.Reader)
rDeltaSigma := witness.y.Random(crand.Reader)
rDeltaRho := witness.y.Random(crand.Reader)
// R_σ = r_σ X
capRSigma := pp.x
capRSigma = capRSigma.Mul(rSigma)
// R_ρ = ρY
capRRho := pp.y
capRRho = capRRho.Mul(rRho)
// R_δσ = r_y T_σ - r_δσ X
negX := pp.x
negX = negX.Neg()
capRDeltaSigma := tSigma.Mul(rY)
capRDeltaSigma = capRDeltaSigma.Add(negX.Mul(rDeltaSigma))
// R_δρ = r_y T_ρ - r_δρ Y
negY := pp.y
negY = negY.Neg()
capRDeltaRho := tRho.Mul(rY)
capRDeltaRho = capRDeltaRho.Add(negY.Mul(rDeltaRho))
// P~
g2 := pk.value.Generator()
// -r_δσ - r_δρ
exp := rDeltaSigma
exp = exp.Add(rDeltaRho)
exp = exp.Neg()
// -r_σ - r_ρ
exp2 := rSigma
exp2 = exp2.Add(rRho)
exp2 = exp2.Neg()
// rY * eC
rYeC := eC.Mul(rY)
// (-r_δσ - r_δρ)*Z
expZ := pp.z.Mul(exp)
// (-r_σ - r_ρ)*Z
exp2Z := pp.z.Mul(exp2)
// Prepare
rYeCPrep, ok := rYeC.(curves.PairingPoint)
if !ok {
return nil, errors.New("incorrect type conversion")
}
g2Prep, ok := g2.(curves.PairingPoint)
if !ok {
return nil, errors.New("incorrect type conversion")
}
expZPrep, ok := expZ.(curves.PairingPoint)
if !ok {
return nil, errors.New("incorrect type conversion")
}
exp2ZPrep, ok := exp2Z.(curves.PairingPoint)
if !ok {
return nil, errors.New("incorrect type conversion")
}
pkPrep := pk.value
// Pairing
capRE := g2Prep.MultiPairing(rYeCPrep, g2Prep, expZPrep, g2Prep, exp2ZPrep, pkPrep)
return &MembershipProofCommitting{
eC,
tSigma,
tRho,
deltaSigma,
deltaRho,
rY,
rSigma,
rRho,
rDeltaSigma,
rDeltaRho,
sigma,
rho,
capRSigma,
capRRho,
capRDeltaSigma,
capRDeltaRho,
capRE,
acc.value,
witness.y,
pp.x,
pp.y,
pp.z,
}, nil
}
// GetChallenge returns bytes that need to be hashed for generating challenge.
// V || Ec || T_sigma || T_rho || R_E || R_sigma || R_rho || R_delta_sigma || R_delta_rho
func (mpc MembershipProofCommitting) GetChallengeBytes() []byte {
res := mpc.accumulator.ToAffineCompressed()
res = append(res, mpc.eC.ToAffineCompressed()...)
res = append(res, mpc.tSigma.ToAffineCompressed()...)
res = append(res, mpc.tRho.ToAffineCompressed()...)
res = append(res, mpc.capRE.Bytes()...)
res = append(res, mpc.capRSigma.ToAffineCompressed()...)
res = append(res, mpc.capRRho.ToAffineCompressed()...)
res = append(res, mpc.capRDeltaSigma.ToAffineCompressed()...)
res = append(res, mpc.capRDeltaRho.ToAffineCompressed()...)
return res
}
// GenProof computes the s values for Fiat-Shamir and return the actual
// proof to be sent to the verifier given the challenge c.
func (mpc *MembershipProofCommitting) GenProof(c curves.Scalar) *MembershipProof {
// s_y = r_y + c*y
sY := schnorr(mpc.blindingFactor, mpc.witnessValue, c)
// s_σ = r_σ + c*σ
sSigma := schnorr(mpc.rSigma, mpc.sigma, c)
// s_ρ = r_ρ + c*ρ
sRho := schnorr(mpc.rRho, mpc.rho, c)
// s_δσ = rδσ + c*δ_σ
sDeltaSigma := schnorr(mpc.rDeltaSigma, mpc.deltaSigma, c)
// s_δρ = rδρ + c*δ_ρ
sDeltaRho := schnorr(mpc.rDeltaRho, mpc.deltaRho, c)
return &MembershipProof{
mpc.eC,
mpc.tSigma,
mpc.tRho,
sSigma,
sRho,
sDeltaSigma,
sDeltaRho,
sY,
}
}
func schnorr(r, v, challenge curves.Scalar) curves.Scalar {
res := v
res = res.Mul(challenge)
res = res.Add(r)
return res
}
type membershipProofMarshal struct {
EC []byte `bare:"e_c"`
TSigma []byte `bare:"t_sigma"`
TRho []byte `bare:"t_rho"`
SSigma []byte `bare:"s_sigma"`
SRho []byte `bare:"s_rho"`
SDeltaSigma []byte `bare:"s_delta_sigma"`
SDeltaRho []byte `bare:"s_delta_rho"`
SY []byte `bare:"s_y"`
Curve string `bare:"curve"`
}
// MembershipProof contains values in the proof to be verified
type MembershipProof struct {
eC curves.Point
tSigma curves.Point
tRho curves.Point
sSigma curves.Scalar
sRho curves.Scalar
sDeltaSigma curves.Scalar
sDeltaRho curves.Scalar
sY curves.Scalar
}
// Finalize computes values in the proof to be verified.
func (mp *MembershipProof) Finalize(acc *Accumulator, pp *ProofParams, pk *PublicKey, challenge curves.Scalar) (*MembershipProofFinal, error) {
// R_σ = s_δ X + c T_σ
negTSigma := mp.tSigma
negTSigma = negTSigma.Neg()
capRSigma := pp.x.Mul(mp.sSigma)
capRSigma = capRSigma.Add(negTSigma.Mul(challenge))
// R_ρ = s_ρ Y + c T_ρ
negTRho := mp.tRho
negTRho = negTRho.Neg()
capRRho := pp.y.Mul(mp.sRho)
capRRho = capRRho.Add(negTRho.Mul(challenge))
// R_δσ = s_y T_σ - s_δσ X
negX := pp.x
negX = negX.Neg()
capRDeltaSigma := mp.tSigma.Mul(mp.sY)
capRDeltaSigma = capRDeltaSigma.Add(negX.Mul(mp.sDeltaSigma))
// R_δρ = s_y T_ρ - s_δρ Y
negY := pp.y
negY = negY.Neg()
capRDeltaRho := mp.tRho.Mul(mp.sY)
capRDeltaRho = capRDeltaRho.Add(negY.Mul(mp.sDeltaRho))
// tildeP
g2 := pk.value.Generator()
// Compute capRE, the pairing
// E_c * s_y
eCsY := mp.eC.Mul(mp.sY)
// (-s_delta_sigma - s_delta_rho) * Z
exp := mp.sDeltaSigma
exp = exp.Add(mp.sDeltaRho)
exp = exp.Neg()
expZ := pp.z.Mul(exp)
// (-c) * V
exp = challenge.Neg()
expV := acc.value.Mul(exp)
// E_c * s_y + (-s_delta_sigma - s_delta_rho) * Z + (-c) * V
lhs := eCsY.Add(expZ).Add(expV)
// (-s_sigma - s_rho) * Z
exp = mp.sSigma
exp = exp.Add(mp.sRho)
exp = exp.Neg()
expZ2 := pp.z.Mul(exp)
// E_c * c
cEc := mp.eC.Mul(challenge)
// (-s_sigma - s_rho) * Z + E_c * c
rhs := cEc.Add(expZ2)
// Prepare
lhsPrep, ok := lhs.(curves.PairingPoint)
if !ok {
return nil, errors.New("incorrect type conversion")
}
g2Prep, ok := g2.(curves.PairingPoint)
if !ok {
return nil, errors.New("incorrect type conversion")
}
rhsPrep, ok := rhs.(curves.PairingPoint)
if !ok {
return nil, errors.New("incorrect type conversion")
}
pkPrep := pk.value
// capRE
capRE := g2Prep.MultiPairing(lhsPrep, g2Prep, rhsPrep, pkPrep)
return &MembershipProofFinal{
acc.value,
mp.eC,
mp.tSigma,
mp.tRho,
capRE,
capRSigma,
capRRho,
capRDeltaSigma,
capRDeltaRho,
}, nil
}
// MarshalBinary converts MembershipProof to bytes
func (mp MembershipProof) MarshalBinary() ([]byte, error) {
tv := &membershipProofMarshal{
EC: mp.eC.ToAffineCompressed(),
TSigma: mp.tSigma.ToAffineCompressed(),
TRho: mp.tRho.ToAffineCompressed(),
SSigma: mp.sSigma.Bytes(),
SRho: mp.sRho.Bytes(),
SDeltaSigma: mp.sDeltaSigma.Bytes(),
SDeltaRho: mp.sDeltaRho.Bytes(),
SY: mp.sY.Bytes(),
Curve: mp.eC.CurveName(),
}
return bare.Marshal(tv)
}
// UnmarshalBinary converts bytes to MembershipProof
func (mp *MembershipProof) UnmarshalBinary(data []byte) error {
if data == nil {
return fmt.Errorf("expected non-zero byte sequence")
}
tv := new(membershipProofMarshal)
err := bare.Unmarshal(data, tv)
if err != nil {
return err
}
curve := curves.GetCurveByName(tv.Curve)
if curve == nil {
return fmt.Errorf("invalid curve")
}
eC, err := curve.NewIdentityPoint().FromAffineCompressed(tv.EC)
if err != nil {
return err
}
tSigma, err := curve.NewIdentityPoint().FromAffineCompressed(tv.TSigma)
if err != nil {
return err
}
tRho, err := curve.NewIdentityPoint().FromAffineCompressed(tv.TRho)
if err != nil {
return err
}
sSigma, err := curve.NewScalar().SetBytes(tv.SSigma)
if err != nil {
return err
}
sRho, err := curve.NewScalar().SetBytes(tv.SRho)
if err != nil {
return err
}
sDeltaSigma, err := curve.NewScalar().SetBytes(tv.SDeltaSigma)
if err != nil {
return err
}
sDeltaRho, err := curve.NewScalar().SetBytes(tv.SDeltaRho)
if err != nil {
return err
}
sY, err := curve.NewScalar().SetBytes(tv.SY)
if err != nil {
return err
}
mp.eC = eC
mp.tSigma = tSigma
mp.tRho = tRho
mp.sSigma = sSigma
mp.sRho = sRho
mp.sDeltaSigma = sDeltaSigma
mp.sDeltaRho = sDeltaRho
mp.sY = sY
return nil
}
// MembershipProofFinal contains values that are input to Fiat-Shamir Heuristic
type MembershipProofFinal struct {
accumulator curves.Point
eC curves.Point
tSigma curves.Point
tRho curves.Point
capRE curves.Scalar
capRSigma curves.Point
capRRho curves.Point
capRDeltaSigma curves.Point
capRDeltaRho curves.Point
}
// GetChallenge computes Fiat-Shamir Heuristic taking input values of MembershipProofFinal
func (m MembershipProofFinal) GetChallenge(curve *curves.PairingCurve) curves.Scalar {
res := m.accumulator.ToAffineCompressed()
res = append(res, m.eC.ToAffineCompressed()...)
res = append(res, m.tSigma.ToAffineCompressed()...)
res = append(res, m.tRho.ToAffineCompressed()...)
res = append(res, m.capRE.Bytes()...)
res = append(res, m.capRSigma.ToAffineCompressed()...)
res = append(res, m.capRRho.ToAffineCompressed()...)
res = append(res, m.capRDeltaSigma.ToAffineCompressed()...)
res = append(res, m.capRDeltaRho.ToAffineCompressed()...)
challenge := curve.Scalar.Hash(res)
return challenge
}

182
crypto/accumulator/proof_test.go Executable file
View File

@ -0,0 +1,182 @@
//
// Copyright Coinbase, Inc. All Rights Reserved.
//
// SPDX-License-Identifier: Apache-2.0
//
package accumulator
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/onsonr/sonr/crypto/core/curves"
)
func TestProofParamsMarshal(t *testing.T) {
curve := curves.BLS12381(&curves.PointBls12381G1{})
sk, _ := new(SecretKey).New(curve, []byte("1234567890"))
pk, _ := sk.GetPublicKey(curve)
params, err := new(ProofParams).New(curve, pk, []byte("entropy"))
require.NoError(t, err)
require.NotNil(t, params.x)
require.NotNil(t, params.y)
require.NotNil(t, params.z)
bytes, err := params.MarshalBinary()
require.NoError(t, err)
require.NotNil(t, bytes)
params2 := &ProofParams{
curve.PointG1.Generator(),
curve.PointG1.Generator(),
curve.PointG1.Generator(),
}
err = params2.UnmarshalBinary(bytes)
require.NoError(t, err)
require.True(t, params.x.Equal(params2.x))
require.True(t, params.y.Equal(params2.y))
require.True(t, params.z.Equal(params2.z))
}
func TestMembershipProof(t *testing.T) {
curve := curves.BLS12381(&curves.PointBls12381G1{})
sk, _ := new(SecretKey).New(curve, []byte("1234567890"))
pk, _ := sk.GetPublicKey(curve)
element1 := curve.Scalar.Hash([]byte("3"))
element2 := curve.Scalar.Hash([]byte("4"))
element3 := curve.Scalar.Hash([]byte("5"))
element4 := curve.Scalar.Hash([]byte("6"))
element5 := curve.Scalar.Hash([]byte("7"))
element6 := curve.Scalar.Hash([]byte("8"))
element7 := curve.Scalar.Hash([]byte("9"))
elements := []Element{element1, element2, element3, element4, element5, element6, element7}
// Initiate a new accumulator
acc, err := new(Accumulator).WithElements(curve, sk, elements)
require.NoError(t, err)
require.NotNil(t, acc.value)
// Initiate a new membership witness for value elements[3]
wit, err := new(MembershipWitness).New(elements[3], acc, sk)
require.NoError(t, err)
require.Equal(t, wit.y, elements[3])
// Create proof parameters, which contains randomly sampled G1 points X, Y, Z, K
params, err := new(ProofParams).New(curve, pk, []byte("entropy"))
require.NoError(t, err)
require.NotNil(t, params.x)
require.NotNil(t, params.y)
require.NotNil(t, params.z)
mpc, err := new(MembershipProofCommitting).New(wit, acc, params, pk)
require.NoError(t, err)
testMPC(t, mpc)
challenge := curve.Scalar.Hash(mpc.GetChallengeBytes())
require.NotNil(t, challenge)
proof := mpc.GenProof(challenge)
require.NotNil(t, proof)
testProof(t, proof)
finalProof, err := proof.Finalize(acc, params, pk, challenge)
require.NoError(t, err)
require.NotNil(t, finalProof)
testFinalProof(t, finalProof)
challenge2 := finalProof.GetChallenge(curve)
require.Equal(t, challenge, challenge2)
// Check we can still have a valid proof even if accumulator and witness are updated
data1 := curve.Scalar.Hash([]byte("1"))
data2 := curve.Scalar.Hash([]byte("2"))
data3 := curve.Scalar.Hash([]byte("3"))
data4 := curve.Scalar.Hash([]byte("4"))
data5 := curve.Scalar.Hash([]byte("5"))
data := []Element{data1, data2, data3, data4, data5}
additions := data[0:2]
deletions := data[2:5]
_, coefficients, err := acc.Update(sk, additions, deletions)
require.NoError(t, err)
require.NotNil(t, coefficients)
_, err = wit.BatchUpdate(additions, deletions, coefficients)
require.NoError(t, err)
newParams, err := new(ProofParams).New(curve, pk, []byte("entropy"))
require.NoError(t, err)
require.NotNil(t, newParams.x)
require.NotNil(t, newParams.y)
require.NotNil(t, newParams.z)
newMPC, err := new(MembershipProofCommitting).New(wit, acc, newParams, pk)
require.NoError(t, err)
testMPC(t, newMPC)
challenge3 := curve.Scalar.Hash(newMPC.GetChallengeBytes())
require.NotNil(t, challenge3)
newProof := newMPC.GenProof(challenge3)
require.NotNil(t, newProof)
testProof(t, newProof)
newFinalProof, err := newProof.Finalize(acc, newParams, pk, challenge3)
require.NoError(t, err)
require.NotNil(t, newFinalProof)
testFinalProof(t, newFinalProof)
challenge4 := newFinalProof.GetChallenge(curve)
require.Equal(t, challenge3, challenge4)
}
func testMPC(t *testing.T, mpc *MembershipProofCommitting) {
require.NotNil(t, mpc.eC)
require.NotNil(t, mpc.tSigma)
require.NotNil(t, mpc.tRho)
require.NotNil(t, mpc.deltaSigma)
require.NotNil(t, mpc.deltaRho)
require.NotNil(t, mpc.blindingFactor)
require.NotNil(t, mpc.rSigma)
require.NotNil(t, mpc.rRho)
require.NotNil(t, mpc.rDeltaSigma)
require.NotNil(t, mpc.rDeltaRho)
require.NotNil(t, mpc.sigma)
require.NotNil(t, mpc.rho)
require.NotNil(t, mpc.capRSigma)
require.NotNil(t, mpc.capRRho)
require.NotNil(t, mpc.capRDeltaSigma)
require.NotNil(t, mpc.capRDeltaRho)
require.NotNil(t, mpc.capRE)
require.NotNil(t, mpc.accumulator)
require.NotNil(t, mpc.witnessValue)
require.NotNil(t, mpc.xG1)
require.NotNil(t, mpc.yG1)
require.NotNil(t, mpc.zG1)
}
func testProof(t *testing.T, proof *MembershipProof) {
require.NotNil(t, proof.eC)
require.NotNil(t, proof.tSigma)
require.NotNil(t, proof.tRho)
require.NotNil(t, proof.sSigma)
require.NotNil(t, proof.sRho)
require.NotNil(t, proof.sDeltaSigma)
require.NotNil(t, proof.sDeltaRho)
require.NotNil(t, proof.sY)
}
func testFinalProof(t *testing.T, finalProof *MembershipProofFinal) {
require.NotNil(t, finalProof.accumulator)
require.NotNil(t, finalProof.eC)
require.NotNil(t, finalProof.tSigma)
require.NotNil(t, finalProof.tRho)
require.NotNil(t, finalProof.capRE)
require.NotNil(t, finalProof.capRSigma)
require.NotNil(t, finalProof.capRRho)
require.NotNil(t, finalProof.capRDeltaSigma)
require.NotNil(t, finalProof.capRDeltaRho)
}

375
crypto/accumulator/witness.go Executable file
View File

@ -0,0 +1,375 @@
//
// Copyright Coinbase, Inc. All Rights Reserved.
//
// SPDX-License-Identifier: Apache-2.0
//
package accumulator
import (
"errors"
"fmt"
"git.sr.ht/~sircmpwn/go-bare"
"github.com/onsonr/sonr/crypto/core/curves"
)
// MembershipWitness contains the witness c and the value y respect to the accumulator state.
type MembershipWitness struct {
c curves.Point
y curves.Scalar
}
// New creates a new membership witness
func (mw *MembershipWitness) New(y Element, acc *Accumulator, sk *SecretKey) (*MembershipWitness, error) {
if acc.value == nil || acc.value.IsIdentity() {
return nil, fmt.Errorf("value of accumulator should not be nil")
}
if sk.value == nil || sk.value.IsZero() {
return nil, fmt.Errorf("secret key should not be nil")
}
if y == nil || y.IsZero() {
return nil, fmt.Errorf("y should not be nil")
}
newAcc := &Accumulator{acc.value}
_, err := newAcc.Remove(sk, y)
if err != nil {
return nil, err
}
mw.c = newAcc.value
mw.y = y.Add(y.Zero())
return mw, nil
}
// Verify the MembershipWitness mw is a valid witness as per section 4 in
// <https://eprint.iacr.org/2020/777>
func (mw MembershipWitness) Verify(pk *PublicKey, acc *Accumulator) error {
if mw.c == nil || mw.y == nil || mw.c.IsIdentity() || mw.y.IsZero() {
return fmt.Errorf("c and y should not be nil")
}
if pk.value == nil || pk.value.IsIdentity() {
return fmt.Errorf("invalid public key")
}
if acc.value == nil || acc.value.IsIdentity() {
return fmt.Errorf("accumulator value should not be nil")
}
// Set -tildeP
g2, ok := pk.value.Generator().(curves.PairingPoint)
if !ok {
return errors.New("incorrect type conversion")
}
// y*tildeP + tildeQ, tildeP is a G2 generator.
p, ok := g2.Mul(mw.y).Add(pk.value).(curves.PairingPoint)
if !ok {
return errors.New("incorrect type conversion")
}
// Prepare
witness, ok := mw.c.(curves.PairingPoint)
if !ok {
return errors.New("incorrect type conversion")
}
v, ok := acc.value.Neg().(curves.PairingPoint)
if !ok {
return errors.New("incorrect type conversion")
}
// Check e(witness, y*tildeP + tildeQ) * e(-acc, tildeP) == Identity
result := p.MultiPairing(witness, p, v, g2)
if !result.IsOne() {
return fmt.Errorf("invalid result")
}
return nil
}
// ApplyDelta returns C' = dA(y)/dD(y)*C + 1/dD(y) * <Gamma_y, Omega>
// according to the witness update protocol described in section 4 of
// https://eprint.iacr.org/2020/777.pdf
func (mw *MembershipWitness) ApplyDelta(delta *Delta) (*MembershipWitness, error) {
if mw.c == nil || mw.y == nil || delta == nil {
return nil, fmt.Errorf("y, c or delta should not be nil")
}
// C' = dA(y)/dD(y)*C + 1/dD(y) * <Gamma_y, Omega>
mw.c = mw.c.Mul(delta.d).Add(delta.p)
return mw, nil
}
// BatchUpdate performs batch update as described in section 4
func (mw *MembershipWitness) BatchUpdate(additions []Element, deletions []Element, coefficients []Coefficient) (*MembershipWitness, error) {
delta, err := evaluateDelta(mw.y, additions, deletions, coefficients)
if err != nil {
return nil, err
}
mw, err = mw.ApplyDelta(delta)
if err != nil {
return nil, fmt.Errorf("applyDelta fails")
}
return mw, nil
}
// MultiBatchUpdate performs multi-batch update using epoch as described in section 4.2
func (mw *MembershipWitness) MultiBatchUpdate(A [][]Element, D [][]Element, C [][]Coefficient) (*MembershipWitness, error) {
delta, err := evaluateDeltas(mw.y, A, D, C)
if err != nil {
return nil, fmt.Errorf("evaluateDeltas fails")
}
mw, err = mw.ApplyDelta(delta)
if err != nil {
return nil, err
}
return mw, nil
}
// MarshalBinary converts a membership witness to bytes
func (mw MembershipWitness) MarshalBinary() ([]byte, error) {
if mw.c == nil || mw.y == nil {
return nil, fmt.Errorf("c and y value should not be nil")
}
result := append(mw.c.ToAffineCompressed(), mw.y.Bytes()...)
tv := &structMarshal{
Value: result,
Curve: mw.c.CurveName(),
}
return bare.Marshal(tv)
}
// UnmarshalBinary converts bytes into MembershipWitness
func (mw *MembershipWitness) UnmarshalBinary(data []byte) error {
if data == nil {
return fmt.Errorf("input data should not be nil")
}
tv := new(structMarshal)
err := bare.Unmarshal(data, tv)
if err != nil {
return err
}
curve := curves.GetCurveByName(tv.Curve)
if curve == nil {
return fmt.Errorf("invalid curve")
}
ptLength := len(curve.Point.ToAffineCompressed())
scLength := len(curve.Scalar.Bytes())
expectedLength := ptLength + scLength
if len(tv.Value) != expectedLength {
return fmt.Errorf("invalid byte sequence")
}
cValue, err := curve.Point.FromAffineCompressed(tv.Value[:ptLength])
if err != nil {
return err
}
yValue, err := curve.Scalar.SetBytes(tv.Value[ptLength:])
if err != nil {
return err
}
mw.c = cValue
mw.y = yValue
return nil
}
// Delta contains values d and p, where d should be the division dA(y)/dD(y) on some value y
// p should be equal to 1/dD * <Gamma_y, Omega>
type Delta struct {
d curves.Scalar
p curves.Point
}
// MarshalBinary converts Delta into bytes
func (d *Delta) MarshalBinary() ([]byte, error) {
if d.d == nil || d.p == nil {
return nil, fmt.Errorf("d and p should not be nil")
}
var result []byte
result = append(result, d.p.ToAffineCompressed()...)
result = append(result, d.d.Bytes()...)
tv := &structMarshal{
Value: result,
Curve: d.p.CurveName(),
}
return bare.Marshal(tv)
}
// UnmarshalBinary converts data into Delta
func (d *Delta) UnmarshalBinary(data []byte) error {
if data == nil {
return fmt.Errorf("expected non-zero byte sequence")
}
tv := new(structMarshal)
err := bare.Unmarshal(data, tv)
if err != nil {
return err
}
curve := curves.GetCurveByName(tv.Curve)
if curve == nil {
return fmt.Errorf("invalid curve")
}
ptLength := len(curve.Point.ToAffineCompressed())
scLength := len(curve.Scalar.Bytes())
expectedLength := ptLength + scLength
if len(tv.Value) != expectedLength {
return fmt.Errorf("invalid byte sequence")
}
pValue, err := curve.NewIdentityPoint().FromAffineCompressed(tv.Value[:ptLength])
if err != nil {
return err
}
dValue, err := curve.NewScalar().SetBytes(tv.Value[ptLength:])
if err != nil {
return err
}
d.d = dValue
d.p = pValue
return nil
}
// evaluateDeltas compute values used for membership witness batch update with epoch
// as described in section 4.2, page 11 of https://eprint.iacr.org/2020/777.pdf
func evaluateDeltas(y Element, A [][]Element, D [][]Element, C [][]Coefficient) (*Delta, error) {
if len(A) != len(D) || len(A) != len(C) {
return nil, fmt.Errorf("a, d, c should have same length")
}
one := y.One()
size := len(A)
// dA(x) = ∏ 1..n (yA_i - x)
aa := make([]curves.Scalar, 0)
// dD(x) = ∏ 1..m (yD_i - x)
dd := make([]curves.Scalar, 0)
a := one
d := one
// dA_{a->b}(y) = ∏ a..b dAs(y)
// dD_{a->b}(y) = ∏ a..b dDs(y)
for i := 0; i < size; i++ {
adds := A[i]
dels := D[i]
// ta = dAs(y)
ta, err := dad(adds, y)
if err != nil {
return nil, fmt.Errorf("dad on additions fails")
}
// td = dDs(y)
td, err := dad(dels, y)
if err != nil {
return nil, fmt.Errorf("dad on deletions fails")
}
// ∏ a..b dAs(y)
a = a.Mul(ta)
// ∏ a..b dDs(y)
d = d.Mul(td)
aa = append(aa, ta)
dd = append(dd, td)
}
// If this fails, then this value was removed.
d, err := d.Invert()
if err != nil {
return nil, fmt.Errorf("no inverse exists")
}
// <Gamma_y, Omega>
p := make(polynomialPoint, 0, size)
// Ωi->j+1 = ∑ 1..t (dAt * dDt-1) · Ω
for i := 0; i < size; i++ {
// t = i+1
// ∏^(t-1)_(h=i+1)
ddh := one
// dDi→t1 (y)
for h := 0; h < i; h++ {
ddh = ddh.Mul(dd[h])
}
// ∏^(j+1)_(k=t+1)
dak := one
// dAt->j(y)
for k := i + 1; k < size; k++ {
dak = dak.Mul(aa[k])
}
// dDi->t-1(y) * dAt->j(y)
dak = dak.Mul(ddh)
pp := make(polynomialPoint, len(C[i]))
for j := 0; j < len(pp); j++ {
pp[j] = C[i][j]
}
// dDi->t-1(y) * dAt->j(y) · Ω
pp, err := pp.Mul(dak)
if err != nil {
return nil, fmt.Errorf("pp.Mul fails")
}
p, err = p.Add(pp)
if err != nil {
return nil, fmt.Errorf("pp.Add fails")
}
}
// dAi->j(y)/dDi->j(y)
a = a.Mul(d)
// Ωi->j(y)
v, err := p.evaluate(y)
if err != nil {
return nil, fmt.Errorf("p.evaluate fails")
}
// (1/dDi->j(y)) * Ωi->j(y)
v = v.Mul(d)
// return
return &Delta{d: a, p: v}, nil
}
// evaluateDelta computes values used for membership witness batch update
// as described in section 4.1 of https://eprint.iacr.org/2020/777.pdf
func evaluateDelta(y Element, additions []Element, deletions []Element, coefficients []Coefficient) (*Delta, error) {
// dD(y) = ∏ 1..m (yD_i - y), d = 1/dD(y)
var err error
d, err := dad(deletions, y)
if err != nil {
return nil, fmt.Errorf("dad fails on deletions")
}
d, err = d.Invert()
if err != nil {
return nil, fmt.Errorf("no inverse exists")
}
// dA(y) = ∏ 1..n (yA_i - y)
a, err := dad(additions, y)
if err != nil {
return nil, fmt.Errorf("dad fails on additions")
}
// dA(y)/dD(y)
a = a.Mul(d)
// Create a PolynomialG1 from coefficients
p := make(polynomialPoint, len(coefficients))
for i := 0; i < len(coefficients); i++ {
p[i] = coefficients[i]
}
// <Gamma_y, Omega>
v, err := p.evaluate(y)
if err != nil {
return nil, fmt.Errorf("p.evaluate fails")
}
// 1/dD * <Gamma_y, Omega>
v = v.Mul(d)
return &Delta{d: a, p: v}, nil
}

View File

@ -0,0 +1,229 @@
//
// Copyright Coinbase, Inc. All Rights Reserved.
//
// SPDX-License-Identifier: Apache-2.0
//
package accumulator
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/onsonr/sonr/crypto/core/curves"
)
func Test_Membership_Witness_New(t *testing.T) {
curve := curves.BLS12381(&curves.PointBls12381G1{})
var seed [32]byte
key, _ := new(SecretKey).New(curve, seed[:])
acc, _ := new(Accumulator).New(curve)
e := curve.Scalar.New(2)
mw, err := new(MembershipWitness).New(e, acc, key)
require.NoError(t, err)
require.NotNil(t, mw.c)
require.NotNil(t, mw.y)
}
func Test_Membership_Witness_Marshal(t *testing.T) {
curve := curves.BLS12381(&curves.PointBls12381G1{})
mw := &MembershipWitness{
curve.PointG1.Generator().Mul(curve.Scalar.New(10)),
curve.Scalar.New(15),
}
data, err := mw.MarshalBinary()
require.NoError(t, err)
require.NotNil(t, data)
newMW := &MembershipWitness{}
err = newMW.UnmarshalBinary(data)
require.NoError(t, err)
require.True(t, mw.c.Equal(newMW.c))
require.Equal(t, 0, mw.y.Cmp(newMW.y))
}
func Test_Membership(t *testing.T) {
curve := curves.BLS12381(&curves.PointBls12381G1{})
sk, _ := new(SecretKey).New(curve, []byte("1234567890"))
pk, _ := sk.GetPublicKey(curve)
element1 := curve.Scalar.Hash([]byte("3"))
element2 := curve.Scalar.Hash([]byte("4"))
element3 := curve.Scalar.Hash([]byte("5"))
element4 := curve.Scalar.Hash([]byte("6"))
element5 := curve.Scalar.Hash([]byte("7"))
element6 := curve.Scalar.Hash([]byte("8"))
element7 := curve.Scalar.Hash([]byte("9"))
elements := []Element{element1, element2, element3, element4, element5, element6, element7}
// nm_witness_max works as well if set to value larger than 0 for this test.x
acc, err := new(Accumulator).WithElements(curve, sk, elements)
require.NoError(t, err)
require.NotNil(t, acc.value)
require.False(t, acc.value.IsIdentity())
require.True(t, acc.value.IsOnCurve())
require.NotEqual(t, acc.value, curve.NewG1GeneratorPoint())
wit, err := new(MembershipWitness).New(elements[3], acc, sk)
require.NoError(t, err)
require.Equal(t, wit.y, elements[3])
err = wit.Verify(pk, acc)
require.NoError(t, err)
// Test wrong cases, forge a wrong witness
wrongWit := MembershipWitness{
curve.PointG1.Identity(),
curve.Scalar.One(),
}
err = wrongWit.Verify(pk, acc)
require.Error(t, err)
// Test wrong cases, forge a wrong accumulator
wrongAcc := &Accumulator{
curve.PointG1.Generator(),
}
err = wit.Verify(pk, wrongAcc)
require.Error(t, err)
}
func Test_Membership_Batch_Update(t *testing.T) {
curve := curves.BLS12381(&curves.PointBls12381G1{})
sk, _ := new(SecretKey).New(curve, []byte("1234567890"))
pk, _ := sk.GetPublicKey(curve)
element1 := curve.Scalar.Hash([]byte("3"))
element2 := curve.Scalar.Hash([]byte("4"))
element3 := curve.Scalar.Hash([]byte("5"))
element4 := curve.Scalar.Hash([]byte("6"))
element5 := curve.Scalar.Hash([]byte("7"))
element6 := curve.Scalar.Hash([]byte("8"))
element7 := curve.Scalar.Hash([]byte("9"))
elements := []Element{element1, element2, element3, element4, element5, element6, element7}
// nm_witness_max works as well if set to value larger than 0 for this test.
acc, err := new(Accumulator).WithElements(curve, sk, elements)
require.NoError(t, err)
require.NotNil(t, acc.value)
wit, err := new(MembershipWitness).New(elements[3], acc, sk)
require.NoError(t, err)
require.Equal(t, wit.y, elements[3])
err = wit.Verify(pk, acc)
require.Nil(t, err)
data1 := curve.Scalar.Hash([]byte("1"))
data2 := curve.Scalar.Hash([]byte("2"))
data3 := curve.Scalar.Hash([]byte("3"))
data4 := curve.Scalar.Hash([]byte("4"))
data5 := curve.Scalar.Hash([]byte("5"))
data := []Element{data1, data2, data3, data4, data5}
additions := data[0:2]
deletions := data[2:5]
_, coefficients, err := acc.Update(sk, additions, deletions)
require.NoError(t, err)
require.NotNil(t, coefficients)
_, err = wit.BatchUpdate(additions, deletions, coefficients)
require.NoError(t, err)
err = wit.Verify(pk, acc)
require.Nil(t, err)
}
func Test_Membership_Multi_Batch_Update(t *testing.T) {
curve := curves.BLS12381(&curves.PointBls12381G1{})
sk, _ := new(SecretKey).New(curve, []byte("1234567890"))
pk, _ := sk.GetPublicKey(curve)
element1 := curve.Scalar.Hash([]byte("3"))
element2 := curve.Scalar.Hash([]byte("4"))
element3 := curve.Scalar.Hash([]byte("5"))
element4 := curve.Scalar.Hash([]byte("6"))
element5 := curve.Scalar.Hash([]byte("7"))
element6 := curve.Scalar.Hash([]byte("8"))
element7 := curve.Scalar.Hash([]byte("9"))
element8 := curve.Scalar.Hash([]byte("10"))
element9 := curve.Scalar.Hash([]byte("11"))
element10 := curve.Scalar.Hash([]byte("12"))
element11 := curve.Scalar.Hash([]byte("13"))
element12 := curve.Scalar.Hash([]byte("14"))
element13 := curve.Scalar.Hash([]byte("15"))
element14 := curve.Scalar.Hash([]byte("16"))
element15 := curve.Scalar.Hash([]byte("17"))
element16 := curve.Scalar.Hash([]byte("18"))
element17 := curve.Scalar.Hash([]byte("19"))
element18 := curve.Scalar.Hash([]byte("20"))
elements := []Element{
element1,
element2,
element3,
element4,
element5,
element6,
element7,
element8,
element9,
element10,
element11,
element12,
element13,
element14,
element15,
element16,
element17,
element18,
}
acc, err := new(Accumulator).WithElements(curve, sk, elements)
require.NoError(t, err)
require.NotNil(t, acc.value)
wit, err := new(MembershipWitness).New(elements[3], acc, sk)
require.NoError(t, err)
err = wit.Verify(pk, acc)
require.Nil(t, err)
data1 := curve.Scalar.Hash([]byte("1"))
data2 := curve.Scalar.Hash([]byte("2"))
data3 := curve.Scalar.Hash([]byte("3"))
data4 := curve.Scalar.Hash([]byte("4"))
data5 := curve.Scalar.Hash([]byte("5"))
data := []Element{data1, data2, data3, data4, data5}
adds1 := data[0:2]
dels1 := data[2:5]
_, coeffs1, err := acc.Update(sk, adds1, dels1)
require.NoError(t, err)
require.NotNil(t, coeffs1)
dels2 := elements[8:10]
_, coeffs2, err := acc.Update(sk, []Element{}, dels2)
require.NoError(t, err)
require.NotNil(t, coeffs2)
dels3 := elements[11:14]
_, coeffs3, err := acc.Update(sk, []Element{}, dels3)
require.NoError(t, err)
require.NotNil(t, coeffs3)
a := make([][]Element, 3)
a[0] = adds1
a[1] = []Element{}
a[2] = []Element{}
d := make([][]Element, 3)
d[0] = dels1
d[1] = dels2
d[2] = dels3
c := make([][]Coefficient, 3)
c[0] = coeffs1
c[1] = coeffs2
c[2] = coeffs3
_, err = wit.MultiBatchUpdate(a, d, c)
require.NoError(t, err)
err = wit.Verify(pk, acc)
require.Nil(t, err)
}

View File

@ -0,0 +1,57 @@
//
// Copyright Coinbase, Inc. All Rights Reserved.
//
// SPDX-License-Identifier: Apache-2.0
//
package bulletproof
import (
"github.com/pkg/errors"
"golang.org/x/crypto/sha3"
"github.com/onsonr/sonr/crypto/core/curves"
)
// generators contains a list of points to be used as generators for bulletproofs.
type generators []curves.Point
// ippGenerators holds generators necessary for an Inner Product Proof
// It includes a single u generator, and a list of generators divided in half to G and H
// See lines 10 on pg 16 of https://eprint.iacr.org/2017/1066.pdf
type ippGenerators struct {
G generators
H generators
}
// getGeneratorPoints generates generators using HashToCurve with Shake256(domain) as input
// lenVector is the length of the scalars used for the Inner Product Proof
// getGeneratorPoints will return 2*lenVector + 1 total points, split between a single u generator
// and G and H lists of vectors per the IPP specification
// See lines 10 on pg 16 of https://eprint.iacr.org/2017/1066.pdf
func getGeneratorPoints(lenVector int, domain []byte, curve curves.Curve) (*ippGenerators, error) {
shake := sha3.NewShake256()
_, err := shake.Write(domain)
if err != nil {
return nil, errors.Wrap(err, "getGeneratorPoints shake.Write")
}
numPoints := lenVector * 2
points := make([]curves.Point, numPoints)
for i := 0; i < numPoints; i++ {
bytes := [64]byte{}
_, err := shake.Read(bytes[:])
if err != nil {
return nil, errors.Wrap(err, "getGeneratorPoints shake.Read")
}
nextPoint := curve.Point.Hash(bytes[:])
points[i] = nextPoint
}
// Get G and H by splitting points in half
G, H, err := splitPointVector(points)
if err != nil {
return nil, errors.Wrap(err, "getGeneratorPoints splitPointVector")
}
out := ippGenerators{G: G, H: H}
return &out, nil
}

View File

@ -0,0 +1,61 @@
package bulletproof
import (
"testing"
"github.com/stretchr/testify/require"
"golang.org/x/crypto/sha3"
"github.com/onsonr/sonr/crypto/core/curves"
)
func TestGeneratorsHappyPath(t *testing.T) {
curve := curves.ED25519()
gs, err := getGeneratorPoints(10, []byte("test"), *curve)
gsConcatenated := concatIPPGenerators(*gs)
require.NoError(t, err)
require.Len(t, gs.G, 10)
require.Len(t, gs.H, 10)
require.True(t, noDuplicates(gsConcatenated))
}
func TestGeneratorsUniquePerDomain(t *testing.T) {
curve := curves.ED25519()
gs1, err := getGeneratorPoints(10, []byte("test"), *curve)
gs1Concatenated := concatIPPGenerators(*gs1)
require.NoError(t, err)
gs2, err := getGeneratorPoints(10, []byte("test2"), *curve)
gs2Concatenated := concatIPPGenerators(*gs2)
require.NoError(t, err)
require.True(t, areDisjoint(gs1Concatenated, gs2Concatenated))
}
func noDuplicates(gs generators) bool {
seen := map[[32]byte]bool{}
for _, G := range gs {
value := sha3.Sum256(G.ToAffineCompressed())
if seen[value] {
return false
}
seen[value] = true
}
return true
}
func areDisjoint(gs1, gs2 generators) bool {
for _, g1 := range gs1 {
for _, g2 := range gs2 {
if g1.Equal(g2) {
return false
}
}
}
return true
}
func concatIPPGenerators(ippGens ippGenerators) generators {
var out generators
out = append(out, ippGens.G...)
out = append(out, ippGens.H...)
return out
}

181
crypto/bulletproof/helpers.go Executable file
View File

@ -0,0 +1,181 @@
//
// Copyright Coinbase, Inc. All Rights Reserved.
//
// SPDX-License-Identifier: Apache-2.0
//
package bulletproof
import (
"github.com/pkg/errors"
"github.com/onsonr/sonr/crypto/core/curves"
)
// innerProduct takes two lists of scalars (a, b) and performs the dot product returning a single scalar.
func innerProduct(a, b []curves.Scalar) (curves.Scalar, error) {
if len(a) != len(b) {
return nil, errors.New("length of scalar vectors must be the same")
}
if len(a) < 1 {
return nil, errors.New("length of vectors must be at least one")
}
// Get a new scalar of value zero of the same curve as input arguments
innerProduct := a[0].Zero()
for i, aElem := range a {
bElem := b[i]
// innerProduct = aElem*bElem + innerProduct
innerProduct = aElem.MulAdd(bElem, innerProduct)
}
return innerProduct, nil
}
// splitPointVector takes a vector of points, splits it in half returning each half.
func splitPointVector(points []curves.Point) ([]curves.Point, []curves.Point, error) {
if len(points) < 1 {
return nil, nil, errors.New("length of points must be at least one")
}
if len(points)&0x01 != 0 {
return nil, nil, errors.New("length of points must be even")
}
nPrime := len(points) >> 1
firstHalf := points[:nPrime]
secondHalf := points[nPrime:]
return firstHalf, secondHalf, nil
}
// splitScalarVector takes a vector of scalars, splits it in half returning each half.
func splitScalarVector(scalars []curves.Scalar) ([]curves.Scalar, []curves.Scalar, error) {
if len(scalars) < 1 {
return nil, nil, errors.New("length of scalars must be at least one")
}
if len(scalars)&0x01 != 0 {
return nil, nil, errors.New("length of scalars must be even")
}
nPrime := len(scalars) >> 1
firstHalf := scalars[:nPrime]
secondHalf := scalars[nPrime:]
return firstHalf, secondHalf, nil
}
// multiplyScalarToPointVector takes a single scalar and a list of points, multiplies each point by scalar.
func multiplyScalarToPointVector(x curves.Scalar, g []curves.Point) []curves.Point {
products := make([]curves.Point, len(g))
for i, gElem := range g {
product := gElem.Mul(x)
products[i] = product
}
return products
}
// multiplyScalarToScalarVector takes a single scalar (x) and a list of scalars (a), multiplies each scalar in the vector by the scalar.
func multiplyScalarToScalarVector(x curves.Scalar, a []curves.Scalar) []curves.Scalar {
products := make([]curves.Scalar, len(a))
for i, aElem := range a {
product := aElem.Mul(x)
products[i] = product
}
return products
}
// multiplyPairwisePointVectors takes two lists of points (g, h) and performs a pairwise multiplication returning a list of points.
func multiplyPairwisePointVectors(g, h []curves.Point) ([]curves.Point, error) {
if len(g) != len(h) {
return nil, errors.New("length of point vectors must be the same")
}
product := make([]curves.Point, len(g))
for i, gElem := range g {
product[i] = gElem.Add(h[i])
}
return product, nil
}
// multiplyPairwiseScalarVectors takes two lists of points (a, b) and performs a pairwise multiplication returning a list of scalars.
func multiplyPairwiseScalarVectors(a, b []curves.Scalar) ([]curves.Scalar, error) {
if len(a) != len(b) {
return nil, errors.New("length of point vectors must be the same")
}
product := make([]curves.Scalar, len(a))
for i, aElem := range a {
product[i] = aElem.Mul(b[i])
}
return product, nil
}
// addPairwiseScalarVectors takes two lists of scalars (a, b) and performs a pairwise addition returning a list of scalars.
func addPairwiseScalarVectors(a, b []curves.Scalar) ([]curves.Scalar, error) {
if len(a) != len(b) {
return nil, errors.New("length of scalar vectors must be the same")
}
sum := make([]curves.Scalar, len(a))
for i, aElem := range a {
sum[i] = aElem.Add(b[i])
}
return sum, nil
}
// subtractPairwiseScalarVectors takes two lists of scalars (a, b) and performs a pairwise subtraction returning a list of scalars.
func subtractPairwiseScalarVectors(a, b []curves.Scalar) ([]curves.Scalar, error) {
if len(a) != len(b) {
return nil, errors.New("length of scalar vectors must be the same")
}
diff := make([]curves.Scalar, len(a))
for i, aElem := range a {
diff[i] = aElem.Sub(b[i])
}
return diff, nil
}
// invertScalars takes a list of scalars then returns a list with each element inverted.
func invertScalars(xs []curves.Scalar) ([]curves.Scalar, error) {
xinvs := make([]curves.Scalar, len(xs))
for i, x := range xs {
xinv, err := x.Invert()
if err != nil {
return nil, errors.Wrap(err, "bulletproof helpers invertx")
}
xinvs[i] = xinv
}
return xinvs, nil
}
// isPowerOfTwo returns whether a number i is a power of two or not.
func isPowerOfTwo(i int) bool {
return i&(i-1) == 0
}
// get2nVector returns a scalar vector 2^n such that [1, 2, 4, ... 2^(n-1)]
// See k^n and 2^n definitions on pg 12 of https://eprint.iacr.org/2017/1066.pdf
func get2nVector(length int, curve curves.Curve) []curves.Scalar {
vector2n := make([]curves.Scalar, length)
vector2n[0] = curve.Scalar.One()
for i := 1; i < length; i++ {
vector2n[i] = vector2n[i-1].Double()
}
return vector2n
}
func get1nVector(length int, curve curves.Curve) []curves.Scalar {
vector1n := make([]curves.Scalar, length)
for i := 0; i < length; i++ {
vector1n[i] = curve.Scalar.One()
}
return vector1n
}
func getknVector(k curves.Scalar, length int, curve curves.Curve) []curves.Scalar {
vectorkn := make([]curves.Scalar, length)
vectorkn[0] = curve.Scalar.One()
vectorkn[1] = k
for i := 2; i < length; i++ {
vectorkn[i] = vectorkn[i-1].Mul(k)
}
return vectorkn
}

View File

@ -0,0 +1,85 @@
package bulletproof
import (
crand "crypto/rand"
"testing"
"github.com/stretchr/testify/require"
"github.com/onsonr/sonr/crypto/core/curves"
)
func TestInnerProductHappyPath(t *testing.T) {
curve := curves.ED25519()
a := randScalarVec(3, *curve)
b := randScalarVec(3, *curve)
_, err := innerProduct(a, b)
require.NoError(t, err)
}
func TestInnerProductMismatchedLengths(t *testing.T) {
curve := curves.ED25519()
a := randScalarVec(3, *curve)
b := randScalarVec(4, *curve)
_, err := innerProduct(a, b)
require.Error(t, err)
}
func TestInnerProductEmptyVector(t *testing.T) {
curve := curves.ED25519()
a := randScalarVec(0, *curve)
b := randScalarVec(0, *curve)
_, err := innerProduct(a, b)
require.Error(t, err)
}
func TestInnerProductOut(t *testing.T) {
curve := curves.ED25519()
a := randScalarVec(2, *curve)
b := randScalarVec(2, *curve)
c, err := innerProduct(a, b)
require.NoError(t, err)
// Calculate manually a0*b0 + a1*b1
cPrime := a[0].Mul(b[0]).Add(a[1].Mul(b[1]))
require.Equal(t, c, cPrime)
}
func TestSplitListofPointsHappyPath(t *testing.T) {
curve := curves.ED25519()
points := randPointVec(10, *curve)
firstHalf, secondHalf, err := splitPointVector(points)
require.NoError(t, err)
require.Len(t, firstHalf, 5)
require.Len(t, secondHalf, 5)
}
func TestSplitListofPointsOddLength(t *testing.T) {
curve := curves.ED25519()
points := randPointVec(11, *curve)
_, _, err := splitPointVector(points)
require.Error(t, err)
}
func TestSplitListofPointsZeroLength(t *testing.T) {
curve := curves.ED25519()
points := randPointVec(0, *curve)
_, _, err := splitPointVector(points)
require.Error(t, err)
}
func randScalarVec(length int, curve curves.Curve) []curves.Scalar {
out := make([]curves.Scalar, length)
for i := 0; i < length; i++ {
out[i] = curve.Scalar.Random(crand.Reader)
}
return out
}
func randPointVec(length int, curve curves.Curve) []curves.Point {
out := make([]curves.Point, length)
for i := 0; i < length; i++ {
out[i] = curve.Point.Random(crand.Reader)
}
return out
}

396
crypto/bulletproof/ipp_prover.go Executable file
View File

@ -0,0 +1,396 @@
//
// Copyright Coinbase, Inc. All Rights Reserved.
//
// SPDX-License-Identifier: Apache-2.0
//
// Package bulletproof implements the zero knowledge protocol bulletproofs as defined in https://eprint.iacr.org/2017/1066.pdf
package bulletproof
import (
"github.com/gtank/merlin"
"github.com/pkg/errors"
"github.com/onsonr/sonr/crypto/core/curves"
)
// InnerProductProver is the struct used to create InnerProductProofs
// It specifies which curve to use and holds precomputed generators
// See NewInnerProductProver() for prover initialization.
type InnerProductProver struct {
curve curves.Curve
generators ippGenerators
}
// InnerProductProof contains necessary output for the inner product proof
// a and b are the final input vectors of scalars, they should be of length 1
// Ls and Rs are calculated per recursion of the IPP and are necessary for verification
// See section 3.1 on pg 15 of https://eprint.iacr.org/2017/1066.pdf
type InnerProductProof struct {
a, b curves.Scalar
capLs, capRs []curves.Point
curve *curves.Curve
}
// ippRecursion is the same as IPP but tracks recursive a', b', g', h' and Ls and Rs
// It should only be used internally by InnerProductProver.Prove()
// See L35 on pg 16 of https://eprint.iacr.org/2017/1066.pdf
type ippRecursion struct {
a, b []curves.Scalar
c curves.Scalar
capLs, capRs []curves.Point
g, h []curves.Point
u, capP curves.Point
transcript *merlin.Transcript
}
// NewInnerProductProver initializes a new prover
// It uses the specified domain to generate generators for vectors of at most maxVectorLength
// A prover can be used to construct inner product proofs for vectors of length less than or equal to maxVectorLength
// A prover is defined by an explicit curve.
func NewInnerProductProver(maxVectorLength int, domain []byte, curve curves.Curve) (*InnerProductProver, error) {
generators, err := getGeneratorPoints(maxVectorLength, domain, curve)
if err != nil {
return nil, errors.Wrap(err, "ipp getGenerators")
}
return &InnerProductProver{curve: curve, generators: *generators}, nil
}
// NewInnerProductProof initializes a new InnerProductProof for a specified curve
// This should be used in tandem with UnmarshalBinary() to convert a marshaled proof into the struct.
func NewInnerProductProof(curve *curves.Curve) *InnerProductProof {
var capLs, capRs []curves.Point
newProof := InnerProductProof{
a: curve.NewScalar(),
b: curve.NewScalar(),
capLs: capLs,
capRs: capRs,
curve: curve,
}
return &newProof
}
// rangeToIPP takes the output of a range proof and converts it into an inner product proof
// See section 4.2 on pg 20
// The conversion specifies generators to use (g and hPrime), as well as the two vectors l, r of which the inner product is tHat
// Additionally, note that the P used for the IPP is in fact P*h^-mu from the range proof.
func (prover *InnerProductProver) rangeToIPP(proofG, proofH []curves.Point, l, r []curves.Scalar, tHat curves.Scalar, capPhmuinv, u curves.Point, transcript *merlin.Transcript) (*InnerProductProof, error) {
// Note that P as a witness is only g^l * h^r
// P needs to be in the form of g^l * h^r * u^<l,r>
// Calculate the final P including the u^<l,r> term
utHat := u.Mul(tHat)
capP := capPhmuinv.Add(utHat)
// Use params to prove inner product
recursionParams := &ippRecursion{
a: l,
b: r,
capLs: []curves.Point{},
capRs: []curves.Point{},
c: tHat,
g: proofG,
h: proofH,
capP: capP,
u: u,
transcript: transcript,
}
return prover.proveRecursive(recursionParams)
}
// getP returns the initial P value given two scalars a,b and point u
// This method should only be used for testing
// See (3) on page 13 of https://eprint.iacr.org/2017/1066.pdf
func (prover *InnerProductProver) getP(a, b []curves.Scalar, u curves.Point) (curves.Point, error) {
// Vectors must have length power of two
if !isPowerOfTwo(len(a)) {
return nil, errors.New("ipp vector length must be power of two")
}
// Generator vectors must be same length
if len(prover.generators.G) != len(prover.generators.H) {
return nil, errors.New("ipp generator lengths of g and h must be equal")
}
// Inner product requires len(a) == len(b) else error is returned
c, err := innerProduct(a, b)
if err != nil {
return nil, errors.Wrap(err, "ipp getInnerProduct")
}
// In case where len(a) is less than number of generators precomputed by prover, trim to length
proofG := prover.generators.G[0:len(a)]
proofH := prover.generators.H[0:len(b)]
// initial P = g^a * h^b * u^(a dot b) (See (3) on page 13 of https://eprint.iacr.org/2017/1066.pdf)
ga := prover.curve.NewGeneratorPoint().SumOfProducts(proofG, a)
hb := prover.curve.NewGeneratorPoint().SumOfProducts(proofH, b)
uadotb := u.Mul(c)
capP := ga.Add(hb).Add(uadotb)
return capP, nil
}
// Prove executes the prover protocol on pg 16 of https://eprint.iacr.org/2017/1066.pdf
// It generates an inner product proof for vectors a and b, using u to blind the inner product in P
// A transcript is used for the Fiat Shamir heuristic.
func (prover *InnerProductProver) Prove(a, b []curves.Scalar, u curves.Point, transcript *merlin.Transcript) (*InnerProductProof, error) {
// Vectors must have length power of two
if !isPowerOfTwo(len(a)) {
return nil, errors.New("ipp vector length must be power of two")
}
// Generator vectors must be same length
if len(prover.generators.G) != len(prover.generators.H) {
return nil, errors.New("ipp generator lengths of g and h must be equal")
}
// Inner product requires len(a) == len(b) else error is returned
c, err := innerProduct(a, b)
if err != nil {
return nil, errors.Wrap(err, "ipp getInnerProduct")
}
// Length of vectors must be less than the number of generators generated
if len(a) > len(prover.generators.G) {
return nil, errors.New("ipp vector length must be less than maxVectorLength")
}
// In case where len(a) is less than number of generators precomputed by prover, trim to length
proofG := prover.generators.G[0:len(a)]
proofH := prover.generators.H[0:len(b)]
// initial P = g^a * h^b * u^(a dot b) (See (3) on page 13 of https://eprint.iacr.org/2017/1066.pdf)
ga := prover.curve.NewGeneratorPoint().SumOfProducts(proofG, a)
hb := prover.curve.NewGeneratorPoint().SumOfProducts(proofH, b)
uadotb := u.Mul(c)
capP := ga.Add(hb).Add(uadotb)
recursionParams := &ippRecursion{
a: a,
b: b,
capLs: []curves.Point{},
capRs: []curves.Point{},
c: c,
g: proofG,
h: proofH,
capP: capP,
u: u,
transcript: transcript,
}
return prover.proveRecursive(recursionParams)
}
// proveRecursive executes the recursion on pg 16 of https://eprint.iacr.org/2017/1066.pdf
func (prover *InnerProductProver) proveRecursive(recursionParams *ippRecursion) (*InnerProductProof, error) {
// length checks
if len(recursionParams.a) != len(recursionParams.b) {
return nil, errors.New("ipp proveRecursive a and b different lengths")
}
if len(recursionParams.g) != len(recursionParams.h) {
return nil, errors.New("ipp proveRecursive g and h different lengths")
}
if len(recursionParams.a) != len(recursionParams.g) {
return nil, errors.New("ipp proveRecursive scalar and point vectors different lengths")
}
// Base case (L14, pg16 of https://eprint.iacr.org/2017/1066.pdf)
if len(recursionParams.a) == 1 {
proof := &InnerProductProof{
a: recursionParams.a[0],
b: recursionParams.b[0],
capLs: recursionParams.capLs,
capRs: recursionParams.capRs,
curve: &prover.curve,
}
return proof, nil
}
// Split current state into low (first half) vs high (second half) vectors
aLo, aHi, err := splitScalarVector(recursionParams.a)
if err != nil {
return nil, errors.Wrap(err, "recursionParams splitScalarVector")
}
bLo, bHi, err := splitScalarVector(recursionParams.b)
if err != nil {
return nil, errors.Wrap(err, "recursionParams splitScalarVector")
}
gLo, gHi, err := splitPointVector(recursionParams.g)
if err != nil {
return nil, errors.Wrap(err, "recursionParams splitPointVector")
}
hLo, hHi, err := splitPointVector(recursionParams.h)
if err != nil {
return nil, errors.Wrap(err, "recursionParams splitPointVector")
}
// c_l, c_r (L21,22, pg16 of https://eprint.iacr.org/2017/1066.pdf)
cL, err := innerProduct(aLo, bHi)
if err != nil {
return nil, errors.Wrap(err, "recursionParams innerProduct")
}
cR, err := innerProduct(aHi, bLo)
if err != nil {
return nil, errors.Wrap(err, "recursionParams innerProduct")
}
// L, R (L23,24, pg16 of https://eprint.iacr.org/2017/1066.pdf)
lga := prover.curve.Point.SumOfProducts(gHi, aLo)
lhb := prover.curve.Point.SumOfProducts(hLo, bHi)
ucL := recursionParams.u.Mul(cL)
capL := lga.Add(lhb).Add(ucL)
rga := prover.curve.Point.SumOfProducts(gLo, aHi)
rhb := prover.curve.Point.SumOfProducts(hHi, bLo)
ucR := recursionParams.u.Mul(cR)
capR := rga.Add(rhb).Add(ucR)
// Add L,R for verifier to use to calculate final g, h
newL := recursionParams.capLs
newL = append(newL, capL)
newR := recursionParams.capRs
newR = append(newR, capR)
// Get x from L, R for non-interactive (See section 4.4 pg22 of https://eprint.iacr.org/2017/1066.pdf)
// Note this replaces the interactive model, i.e. L36-28 of pg16 of https://eprint.iacr.org/2017/1066.pdf
x, err := prover.calcx(capL, capR, recursionParams.transcript)
if err != nil {
return nil, errors.Wrap(err, "recursionParams calcx")
}
// Calculate recursive inputs
xInv, err := x.Invert()
if err != nil {
return nil, errors.Wrap(err, "recursionParams x.Invert")
}
// g', h' (L29,30, pg16 of https://eprint.iacr.org/2017/1066.pdf)
gLoxInverse := multiplyScalarToPointVector(xInv, gLo)
gHix := multiplyScalarToPointVector(x, gHi)
gPrime, err := multiplyPairwisePointVectors(gLoxInverse, gHix)
if err != nil {
return nil, errors.Wrap(err, "recursionParams multiplyPairwisePointVectors")
}
hLox := multiplyScalarToPointVector(x, hLo)
hHixInv := multiplyScalarToPointVector(xInv, hHi)
hPrime, err := multiplyPairwisePointVectors(hLox, hHixInv)
if err != nil {
return nil, errors.Wrap(err, "recursionParams multiplyPairwisePointVectors")
}
// P' (L31, pg16 of https://eprint.iacr.org/2017/1066.pdf)
xSquare := x.Square()
xInvSquare := xInv.Square()
LxSquare := capL.Mul(xSquare)
RxInvSquare := capR.Mul(xInvSquare)
PPrime := LxSquare.Add(recursionParams.capP).Add(RxInvSquare)
// a', b' (L33, 34, pg16 of https://eprint.iacr.org/2017/1066.pdf)
aLox := multiplyScalarToScalarVector(x, aLo)
aHixIn := multiplyScalarToScalarVector(xInv, aHi)
aPrime, err := addPairwiseScalarVectors(aLox, aHixIn)
if err != nil {
return nil, errors.Wrap(err, "recursionParams addPairwiseScalarVectors")
}
bLoxInv := multiplyScalarToScalarVector(xInv, bLo)
bHix := multiplyScalarToScalarVector(x, bHi)
bPrime, err := addPairwiseScalarVectors(bLoxInv, bHix)
if err != nil {
return nil, errors.Wrap(err, "recursionParams addPairwiseScalarVectors")
}
// c'
cPrime, err := innerProduct(aPrime, bPrime)
if err != nil {
return nil, errors.Wrap(err, "recursionParams innerProduct")
}
// Make recursive call (L35, pg16 of https://eprint.iacr.org/2017/1066.pdf)
recursiveIPP := &ippRecursion{
a: aPrime,
b: bPrime,
capLs: newL,
capRs: newR,
c: cPrime,
g: gPrime,
h: hPrime,
capP: PPrime,
u: recursionParams.u,
transcript: recursionParams.transcript,
}
out, err := prover.proveRecursive(recursiveIPP)
if err != nil {
return nil, errors.Wrap(err, "recursionParams proveRecursive")
}
return out, nil
}
// calcx uses a merlin transcript for Fiat Shamir
// For each recursion, it takes the current state of the transcript and appends the newly calculated L and R values
// A new scalar is then read from the transcript
// See section 4.4 pg22 of https://eprint.iacr.org/2017/1066.pdf
func (prover *InnerProductProver) calcx(capL, capR curves.Point, transcript *merlin.Transcript) (curves.Scalar, error) {
// Add the newest capL and capR values to transcript
transcript.AppendMessage([]byte("addRecursiveL"), capL.ToAffineUncompressed())
transcript.AppendMessage([]byte("addRecursiveR"), capR.ToAffineUncompressed())
// Read 64 bytes from, set to scalar
outBytes := transcript.ExtractBytes([]byte("getx"), 64)
x, err := prover.curve.NewScalar().SetBytesWide(outBytes)
if err != nil {
return nil, errors.Wrap(err, "calcx NewScalar SetBytesWide")
}
return x, nil
}
// MarshalBinary takes an inner product proof and marshals into bytes.
func (proof *InnerProductProof) MarshalBinary() []byte {
var out []byte
out = append(out, proof.a.Bytes()...)
out = append(out, proof.b.Bytes()...)
for i, capLElem := range proof.capLs {
capRElem := proof.capRs[i]
out = append(out, capLElem.ToAffineCompressed()...)
out = append(out, capRElem.ToAffineCompressed()...)
}
return out
}
// UnmarshalBinary takes bytes of a marshaled proof and writes them into an inner product proof
// The inner product proof used should be from the output of NewInnerProductProof().
func (proof *InnerProductProof) UnmarshalBinary(data []byte) error {
scalarLen := len(proof.curve.NewScalar().Bytes())
pointLen := len(proof.curve.NewGeneratorPoint().ToAffineCompressed())
ptr := 0
// Get scalars
a, err := proof.curve.NewScalar().SetBytes(data[ptr : ptr+scalarLen])
if err != nil {
return errors.New("innerProductProof UnmarshalBinary SetBytes")
}
proof.a = a
ptr += scalarLen
b, err := proof.curve.NewScalar().SetBytes(data[ptr : ptr+scalarLen])
if err != nil {
return errors.New("innerProductProof UnmarshalBinary SetBytes")
}
proof.b = b
ptr += scalarLen
// Get points
var capLs, capRs []curves.Point //nolint:prealloc // pointer arithmetic makes it too unreadable.
for ptr < len(data) {
capLElem, err := proof.curve.Point.FromAffineCompressed(data[ptr : ptr+pointLen])
if err != nil {
return errors.New("innerProductProof UnmarshalBinary FromAffineCompressed")
}
capLs = append(capLs, capLElem)
ptr += pointLen
capRElem, err := proof.curve.Point.FromAffineCompressed(data[ptr : ptr+pointLen])
if err != nil {
return errors.New("innerProductProof UnmarshalBinary FromAffineCompressed")
}
capRs = append(capRs, capRElem)
ptr += pointLen
}
proof.capLs = capLs
proof.capRs = capRs
return nil
}

View File

@ -0,0 +1,99 @@
package bulletproof
import (
crand "crypto/rand"
"testing"
"github.com/gtank/merlin"
"github.com/stretchr/testify/require"
"github.com/onsonr/sonr/crypto/core/curves"
)
func TestIPPHappyPath(t *testing.T) {
curve := curves.ED25519()
prover, err := NewInnerProductProver(8, []byte("test"), *curve)
require.NoError(t, err)
a := randScalarVec(8, *curve)
b := randScalarVec(8, *curve)
u := curve.Point.Random(crand.Reader)
transcript := merlin.NewTranscript("test")
proof, err := prover.Prove(a, b, u, transcript)
require.NoError(t, err)
require.Equal(t, 3, len(proof.capLs))
require.Equal(t, 3, len(proof.capRs))
}
func TestIPPMismatchedVectors(t *testing.T) {
curve := curves.ED25519()
prover, err := NewInnerProductProver(8, []byte("test"), *curve)
require.NoError(t, err)
a := randScalarVec(4, *curve)
b := randScalarVec(8, *curve)
u := curve.Point.Random(crand.Reader)
transcript := merlin.NewTranscript("test")
_, err = prover.Prove(a, b, u, transcript)
require.Error(t, err)
}
func TestIPPNonPowerOfTwoLengthVectors(t *testing.T) {
curve := curves.ED25519()
prover, err := NewInnerProductProver(8, []byte("test"), *curve)
require.NoError(t, err)
a := randScalarVec(3, *curve)
b := randScalarVec(3, *curve)
u := curve.Point.Random(crand.Reader)
transcript := merlin.NewTranscript("test")
_, err = prover.Prove(a, b, u, transcript)
require.Error(t, err)
}
func TestIPPZeroLengthVectors(t *testing.T) {
curve := curves.ED25519()
prover, err := NewInnerProductProver(8, []byte("test"), *curve)
require.NoError(t, err)
a := randScalarVec(0, *curve)
b := randScalarVec(0, *curve)
u := curve.Point.Random(crand.Reader)
transcript := merlin.NewTranscript("test")
_, err = prover.Prove(a, b, u, transcript)
require.Error(t, err)
}
func TestIPPGreaterThanMaxLengthVectors(t *testing.T) {
curve := curves.ED25519()
prover, err := NewInnerProductProver(8, []byte("test"), *curve)
require.NoError(t, err)
a := randScalarVec(16, *curve)
b := randScalarVec(16, *curve)
u := curve.Point.Random(crand.Reader)
transcript := merlin.NewTranscript("test")
_, err = prover.Prove(a, b, u, transcript)
require.Error(t, err)
}
func TestIPPMarshal(t *testing.T) {
curve := curves.ED25519()
prover, err := NewInnerProductProver(8, []byte("test"), *curve)
require.NoError(t, err)
a := randScalarVec(8, *curve)
b := randScalarVec(8, *curve)
u := curve.Point.Random(crand.Reader)
transcript := merlin.NewTranscript("test")
proof, err := prover.Prove(a, b, u, transcript)
require.NoError(t, err)
proofMarshaled := proof.MarshalBinary()
proofPrime := NewInnerProductProof(curve)
err = proofPrime.UnmarshalBinary(proofMarshaled)
require.NoError(t, err)
require.Zero(t, proof.a.Cmp(proofPrime.a))
require.Zero(t, proof.b.Cmp(proofPrime.b))
for i, proofCapLElem := range proof.capLs {
proofPrimeCapLElem := proofPrime.capLs[i]
require.True(t, proofCapLElem.Equal(proofPrimeCapLElem))
proofCapRElem := proof.capRs[i]
proofPrimeCapRElem := proofPrime.capRs[i]
require.True(t, proofCapRElem.Equal(proofPrimeCapRElem))
}
}

View File

@ -0,0 +1,209 @@
package bulletproof
import (
"github.com/gtank/merlin"
"github.com/pkg/errors"
"github.com/onsonr/sonr/crypto/core/curves"
)
// InnerProductVerifier is the struct used to verify inner product proofs
// It specifies which curve to use and holds precomputed generators
// See NewInnerProductProver() for prover initialization.
type InnerProductVerifier struct {
curve curves.Curve
generators ippGenerators
}
// NewInnerProductVerifier initializes a new verifier
// It uses the specified domain to generate generators for vectors of at most maxVectorLength
// A verifier can be used to verify inner product proofs for vectors of length less than or equal to maxVectorLength
// A verifier is defined by an explicit curve.
func NewInnerProductVerifier(maxVectorLength int, domain []byte, curve curves.Curve) (*InnerProductVerifier, error) {
generators, err := getGeneratorPoints(maxVectorLength, domain, curve)
if err != nil {
return nil, errors.Wrap(err, "ipp getGenerators")
}
return &InnerProductVerifier{curve: curve, generators: *generators}, nil
}
// Verify verifies the given proof inputs
// It implements the final comparison of section 3.1 on pg17 of https://eprint.iacr.org/2017/1066.pdf
func (verifier *InnerProductVerifier) Verify(capP, u curves.Point, proof *InnerProductProof, transcript *merlin.Transcript) (bool, error) {
if len(proof.capLs) != len(proof.capRs) {
return false, errors.New("ipp capLs and capRs must be same length")
}
// Generator vectors must be same length
if len(verifier.generators.G) != len(verifier.generators.H) {
return false, errors.New("ipp generator lengths of g and h must be equal")
}
// Get generators for each elem in a, b and one more for u
// len(Ls) = log n, therefore can just exponentiate
n := 1 << len(proof.capLs)
// Length of vectors must be less than the number of generators generated
if n > len(verifier.generators.G) {
return false, errors.New("ipp vector length must be less than maxVectorLength")
}
// In case where len(a) is less than number of generators precomputed by prover, trim to length
proofG := verifier.generators.G[0:n]
proofH := verifier.generators.H[0:n]
xs, err := getxs(transcript, proof.capLs, proof.capRs, verifier.curve)
if err != nil {
return false, errors.Wrap(err, "verifier getxs")
}
s, err := verifier.getsNew(xs, n)
if err != nil {
return false, errors.Wrap(err, "verifier getss")
}
lhs, err := verifier.getLHS(u, proof, proofG, proofH, s)
if err != nil {
return false, errors.Wrap(err, "verify getLHS")
}
rhs, err := verifier.getRHS(capP, proof, xs)
if err != nil {
return false, errors.Wrap(err, "verify getRHS")
}
return lhs.Equal(rhs), nil
}
// Verify verifies the given proof inputs
// It implements the final comparison of section 3.1 on pg17 of https://eprint.iacr.org/2017/1066.pdf
func (verifier *InnerProductVerifier) VerifyFromRangeProof(proofG, proofH []curves.Point, capPhmuinv, u curves.Point, tHat curves.Scalar, proof *InnerProductProof, transcript *merlin.Transcript) (bool, error) {
// Get generators for each elem in a, b and one more for u
// len(Ls) = log n, therefore can just exponentiate
n := 1 << len(proof.capLs)
xs, err := getxs(transcript, proof.capLs, proof.capRs, verifier.curve)
if err != nil {
return false, errors.Wrap(err, "verifier getxs")
}
s, err := verifier.gets(xs, n)
if err != nil {
return false, errors.Wrap(err, "verifier getss")
}
lhs, err := verifier.getLHS(u, proof, proofG, proofH, s)
if err != nil {
return false, errors.Wrap(err, "verify getLHS")
}
utHat := u.Mul(tHat)
capP := capPhmuinv.Add(utHat)
rhs, err := verifier.getRHS(capP, proof, xs)
if err != nil {
return false, errors.Wrap(err, "verify getRHS")
}
return lhs.Equal(rhs), nil
}
// getRHS gets the right hand side of the final comparison of section 3.1 on pg17.
func (*InnerProductVerifier) getRHS(capP curves.Point, proof *InnerProductProof, xs []curves.Scalar) (curves.Point, error) {
product := capP
for j, Lj := range proof.capLs {
Rj := proof.capRs[j]
xj := xs[j]
xjSquare := xj.Square()
xjSquareInv, err := xjSquare.Invert()
if err != nil {
return nil, errors.Wrap(err, "verify invert")
}
LjxjSquare := Lj.Mul(xjSquare)
RjxjSquareInv := Rj.Mul(xjSquareInv)
product = product.Add(LjxjSquare).Add(RjxjSquareInv)
}
return product, nil
}
// getLHS gets the left hand side of the final comparison of section 3.1 on pg17.
func (verifier *InnerProductVerifier) getLHS(u curves.Point, proof *InnerProductProof, g, h []curves.Point, s []curves.Scalar) (curves.Point, error) {
sInv, err := invertScalars(s)
if err != nil {
return nil, errors.Wrap(err, "verify invertScalars")
}
// g^(a*s)
as := multiplyScalarToScalarVector(proof.a, s)
gas := verifier.curve.Point.SumOfProducts(g, as)
// h^(b*s^-1)
bsInv := multiplyScalarToScalarVector(proof.b, sInv)
hbsInv := verifier.curve.Point.SumOfProducts(h, bsInv)
// u^a*b
ab := proof.a.Mul(proof.b)
uab := u.Mul(ab)
// g^(a*s) * h^(b*s^-1) * u^a*b
out := gas.Add(hbsInv).Add(uab)
return out, nil
}
// getxs calculates the x values from Ls and Rs
// Note that each x is read from the transcript, then the L and R at a certain index are written to the transcript
// This mirrors the reading of xs and writing of Ls and Rs in the prover.
func getxs(transcript *merlin.Transcript, capLs, capRs []curves.Point, curve curves.Curve) ([]curves.Scalar, error) {
xs := make([]curves.Scalar, len(capLs))
for i, capLi := range capLs {
capRi := capRs[i]
// Add the newest L and R values to transcript
transcript.AppendMessage([]byte("addRecursiveL"), capLi.ToAffineUncompressed())
transcript.AppendMessage([]byte("addRecursiveR"), capRi.ToAffineUncompressed())
// Read 64 bytes from, set to scalar
outBytes := transcript.ExtractBytes([]byte("getx"), 64)
x, err := curve.NewScalar().SetBytesWide(outBytes)
if err != nil {
return nil, errors.Wrap(err, "calcx NewScalar SetBytesWide")
}
xs[i] = x
}
return xs, nil
}
// gets calculates the vector s of values used for verification
// See the second expression of section 3.1 on pg15
// nolint
func (verifier *InnerProductVerifier) gets(xs []curves.Scalar, n int) ([]curves.Scalar, error) {
ss := make([]curves.Scalar, n)
for i := 0; i < n; i++ {
si := verifier.curve.Scalar.One()
for j, xj := range xs {
if i>>(len(xs)-j-1)&0x01 == 1 {
si = si.Mul(xj)
} else {
xjInverse, err := xj.Invert()
if err != nil {
return nil, errors.Wrap(err, "getss invert")
}
si = si.Mul(xjInverse)
}
}
ss[i] = si
}
return ss, nil
}
// getsNew calculates the vector s of values used for verification
// It provides analogous functionality as gets(), but uses a O(n) algorithm vs O(nlogn)
// The algorithm inverts all xs, then begins multiplying the inversion by the square of x elements to
// calculate all s values thus minimizing necessary inversions/ computation.
func (verifier *InnerProductVerifier) getsNew(xs []curves.Scalar, n int) ([]curves.Scalar, error) {
var err error
ss := make([]curves.Scalar, n)
// First element is all xs inverted mul'd
ss[0] = verifier.curve.Scalar.One()
for _, xj := range xs {
ss[0] = ss[0].Mul(xj)
}
ss[0], err = ss[0].Invert()
if err != nil {
return nil, errors.Wrap(err, "ipp gets inv ss0")
}
for j, xj := range xs {
xjSquared := xj.Square()
for i := 0; i < n; i += 1 << (len(xs) - j) {
ss[i+1<<(len(xs)-j-1)] = ss[i].Mul(xjSquared)
}
}
return ss, nil
}

View File

@ -0,0 +1,79 @@
package bulletproof
import (
crand "crypto/rand"
"testing"
"github.com/gtank/merlin"
"github.com/stretchr/testify/require"
"github.com/onsonr/sonr/crypto/core/curves"
)
func TestIPPVerifyHappyPath(t *testing.T) {
curve := curves.ED25519()
vecLength := 256
prover, err := NewInnerProductProver(vecLength, []byte("test"), *curve)
require.NoError(t, err)
a := randScalarVec(vecLength, *curve)
b := randScalarVec(vecLength, *curve)
u := curve.Point.Random(crand.Reader)
transcriptProver := merlin.NewTranscript("test")
proof, err := prover.Prove(a, b, u, transcriptProver)
require.NoError(t, err)
verifier, err := NewInnerProductVerifier(vecLength, []byte("test"), *curve)
require.NoError(t, err)
capP, err := prover.getP(a, b, u)
require.NoError(t, err)
transcriptVerifier := merlin.NewTranscript("test")
verified, err := verifier.Verify(capP, u, proof, transcriptVerifier)
require.NoError(t, err)
require.True(t, verified)
}
func BenchmarkIPPVerification(bench *testing.B) {
curve := curves.ED25519()
vecLength := 1024
prover, _ := NewInnerProductProver(vecLength, []byte("test"), *curve)
a := randScalarVec(vecLength, *curve)
b := randScalarVec(vecLength, *curve)
u := curve.Point.Random(crand.Reader)
transcriptProver := merlin.NewTranscript("test")
proof, _ := prover.Prove(a, b, u, transcriptProver)
verifier, _ := NewInnerProductVerifier(vecLength, []byte("test"), *curve)
capP, _ := prover.getP(a, b, u)
transcriptVerifier := merlin.NewTranscript("test")
verified, _ := verifier.Verify(capP, u, proof, transcriptVerifier)
require.True(bench, verified)
}
func TestIPPVerifyInvalidProof(t *testing.T) {
curve := curves.ED25519()
vecLength := 64
prover, err := NewInnerProductProver(vecLength, []byte("test"), *curve)
require.NoError(t, err)
a := randScalarVec(vecLength, *curve)
b := randScalarVec(vecLength, *curve)
u := curve.Point.Random(crand.Reader)
aPrime := randScalarVec(64, *curve)
bPrime := randScalarVec(64, *curve)
uPrime := curve.Point.Random(crand.Reader)
transcriptProver := merlin.NewTranscript("test")
proofPrime, err := prover.Prove(aPrime, bPrime, uPrime, transcriptProver)
require.NoError(t, err)
verifier, err := NewInnerProductVerifier(vecLength, []byte("test"), *curve)
require.NoError(t, err)
capP, err := prover.getP(a, b, u)
require.NoError(t, err)
transcriptVerifier := merlin.NewTranscript("test")
// Check for different capP, u from proof
verified, err := verifier.Verify(capP, u, proofPrime, transcriptVerifier)
require.NoError(t, err)
require.False(t, verified)
}

View File

@ -0,0 +1,348 @@
package bulletproof
import (
crand "crypto/rand"
"github.com/gtank/merlin"
"github.com/pkg/errors"
"github.com/onsonr/sonr/crypto/core/curves"
)
// BatchProve proves that a list of scalars v are in the range n.
// It implements the aggregating logarithmic proofs defined on pg21.
// Instead of taking a single value and a single blinding factor, BatchProve takes in a list of values and list of
// blinding factors.
func (prover *RangeProver) BatchProve(v, gamma []curves.Scalar, n int, proofGenerators RangeProofGenerators, transcript *merlin.Transcript) (*RangeProof, error) {
// Define nm as the total bits required for secrets, calculated as number of secrets * n
m := len(v)
nm := n * m
// nm must be less than or equal to the number of generators generated
if nm > len(prover.generators.G) {
return nil, errors.New("ipp vector length must be less than or equal to maxVectorLength")
}
// In case where nm is less than number of generators precomputed by prover, trim to length
proofG := prover.generators.G[0:nm]
proofH := prover.generators.H[0:nm]
// Check that each elem in v is in range [0, 2^n]
for _, vi := range v {
checkedRange := checkRange(vi, n)
if checkedRange != nil {
return nil, checkedRange
}
}
// L40 on pg19
aL, err := getaLBatched(v, n, prover.curve)
if err != nil {
return nil, errors.Wrap(err, "rangeproof prove")
}
onenm := get1nVector(nm, prover.curve)
// L41 on pg19
aR, err := subtractPairwiseScalarVectors(aL, onenm)
if err != nil {
return nil, errors.Wrap(err, "rangeproof prove")
}
alpha := prover.curve.Scalar.Random(crand.Reader)
// Calc A (L44, pg19)
halpha := proofGenerators.h.Mul(alpha)
gaL := prover.curve.Point.SumOfProducts(proofG, aL)
haR := prover.curve.Point.SumOfProducts(proofH, aR)
capA := halpha.Add(gaL).Add(haR)
// L45, 46, pg19
sL := getBlindingVector(nm, prover.curve)
sR := getBlindingVector(nm, prover.curve)
rho := prover.curve.Scalar.Random(crand.Reader)
// Calc S (L47, pg19)
hrho := proofGenerators.h.Mul(rho)
gsL := prover.curve.Point.SumOfProducts(proofG, sL)
hsR := prover.curve.Point.SumOfProducts(proofH, sR)
capS := hrho.Add(gsL).Add(hsR)
// Fiat Shamir for y,z (L49, pg19)
capV := getcapVBatched(v, gamma, proofGenerators.g, proofGenerators.h)
y, z, err := calcyzBatched(capV, capA, capS, transcript, prover.curve)
if err != nil {
return nil, errors.Wrap(err, "rangeproof prove")
}
// Calc t_1, t_2
// See the l(X), r(X), equations on pg 21
// Use l(X)'s and r(X)'s constant and linear terms to derive t_1 and t_2
// (a_l - z*1^n)
zonenm := multiplyScalarToScalarVector(z, onenm)
constantTerml, err := subtractPairwiseScalarVectors(aL, zonenm)
if err != nil {
return nil, errors.Wrap(err, "rangeproof prove")
}
linearTerml := sL
// zSum term, see equation 71 on pg21
zSum := getSumTermrXBatched(z, n, len(v), prover.curve)
// a_r + z*1^nm
aRPluszonenm, err := addPairwiseScalarVectors(aR, zonenm)
if err != nil {
return nil, errors.Wrap(err, "rangeproof prove")
}
ynm := getknVector(y, nm, prover.curve)
hadamard, err := multiplyPairwiseScalarVectors(ynm, aRPluszonenm)
if err != nil {
return nil, errors.Wrap(err, "rangeproof prove")
}
constantTermr, err := addPairwiseScalarVectors(hadamard, zSum)
if err != nil {
return nil, errors.Wrap(err, "rangeproof prove")
}
linearTermr, err := multiplyPairwiseScalarVectors(ynm, sR)
if err != nil {
return nil, errors.Wrap(err, "rangeproof prove")
}
// t_1 (as the linear coefficient) is the sum of the dot products of l(X)'s linear term dot r(X)'s constant term
// and r(X)'s linear term dot l(X)'s constant term
t1FirstTerm, err := innerProduct(linearTerml, constantTermr)
if err != nil {
return nil, errors.Wrap(err, "rangeproof prove")
}
t1SecondTerm, err := innerProduct(linearTermr, constantTerml)
if err != nil {
return nil, errors.Wrap(err, "rangeproof prove")
}
t1 := t1FirstTerm.Add(t1SecondTerm)
// t_2 (as the quadratic coefficient) is the dot product of l(X)'s and r(X)'s linear terms
t2, err := innerProduct(linearTerml, linearTermr)
if err != nil {
return nil, errors.Wrap(err, "rangeproof prove")
}
// L52, pg20
tau1 := prover.curve.Scalar.Random(crand.Reader)
tau2 := prover.curve.Scalar.Random(crand.Reader)
// T_1, T_2 (L53, pg20)
capT1 := proofGenerators.g.Mul(t1).Add(proofGenerators.h.Mul(tau1))
capT2 := proofGenerators.g.Mul(t2).Add(proofGenerators.h.Mul(tau2))
// Fiat shamir for x (L55, pg20)
x, err := calcx(capT1, capT2, transcript, prover.curve)
if err != nil {
return nil, errors.Wrap(err, "rangeproof prove")
}
// Calc l
// Instead of using the expression in the line, evaluate l() at x
sLx := multiplyScalarToScalarVector(x, linearTerml)
l, err := addPairwiseScalarVectors(constantTerml, sLx)
if err != nil {
return nil, errors.Wrap(err, "rangeproof prove")
}
// Calc r
// Instead of using the expression in the line, evaluate r() at x
ynsRx := multiplyScalarToScalarVector(x, linearTermr)
r, err := addPairwiseScalarVectors(constantTermr, ynsRx)
if err != nil {
return nil, errors.Wrap(err, "rangeproof prove")
}
// Calc t hat
// For efficiency, instead of calculating the dot product, evaluate t() at x
zm := getknVector(z, m, prover.curve)
zsquarezm := multiplyScalarToScalarVector(z.Square(), zm)
sumv := prover.curve.Scalar.Zero()
for i := 0; i < m; i++ {
elem := zsquarezm[i].Mul(v[i])
sumv = sumv.Add(elem)
}
deltayzBatched, err := deltayzBatched(y, z, n, m, prover.curve)
if err != nil {
return nil, errors.Wrap(err, "rangeproof prove")
}
t0 := sumv.Add(deltayzBatched)
tLinear := t1.Mul(x)
tQuadratic := t2.Mul(x.Square())
tHat := t0.Add(tLinear).Add(tQuadratic)
// Calc tau_x (L61, pg20)
tau2xsquare := tau2.Mul(x.Square())
tau1x := tau1.Mul(x)
zsum := prover.curve.Scalar.Zero()
zExp := z.Clone()
for j := 1; j < m+1; j++ {
zExp = zExp.Mul(z)
zsum = zsum.Add(zExp.Mul(gamma[j-1]))
}
taux := tau2xsquare.Add(tau1x).Add(zsum)
// Calc mu (L62, pg20)
mu := alpha.Add(rho.Mul(x))
// Calc IPP (See section 4.2)
hPrime, err := gethPrime(proofH, y, prover.curve)
if err != nil {
return nil, errors.Wrap(err, "rangeproof prove")
}
// P is redefined in batched case, see bottom equation on pg21.
capPhmu := getPhmuBatched(proofG, hPrime, proofGenerators.h, capA, capS, x, y, z, mu, n, m, prover.curve)
wBytes := transcript.ExtractBytes([]byte("getw"), 64)
w, err := prover.curve.NewScalar().SetBytesWide(wBytes)
if err != nil {
return nil, errors.Wrap(err, "rangeproof prove")
}
ipp, err := prover.ippProver.rangeToIPP(proofG, hPrime, l, r, tHat, capPhmu, proofGenerators.u.Mul(w), transcript)
if err != nil {
return nil, errors.Wrap(err, "rangeproof prove")
}
out := &RangeProof{
capA: capA,
capS: capS,
capT1: capT1,
capT2: capT2,
taux: taux,
mu: mu,
tHat: tHat,
ipp: ipp,
curve: &prover.curve,
}
return out, nil
}
// See final term of L71 on pg 21
// Sigma_{j=1}^{m} z^{1+j} * (0^{(j-1)*n} || 2^{n} || 0^{(m-j)*n}).
func getSumTermrXBatched(z curves.Scalar, n, m int, curve curves.Curve) []curves.Scalar {
twoN := get2nVector(n, curve)
var out []curves.Scalar
// The final power should be one more than m
zExp := z.Clone()
for j := 0; j < m; j++ {
zExp = zExp.Mul(z)
elem := multiplyScalarToScalarVector(zExp, twoN)
out = append(out, elem...)
}
return out
}
func getcapVBatched(v, gamma []curves.Scalar, g, h curves.Point) []curves.Point {
out := make([]curves.Point, len(v))
for i, vi := range v {
out[i] = getcapV(vi, gamma[i], g, h)
}
return out
}
func getaLBatched(v []curves.Scalar, n int, curve curves.Curve) ([]curves.Scalar, error) {
var aL []curves.Scalar
for _, vi := range v {
aLi, err := getaL(vi, n, curve)
if err != nil {
return nil, err
}
aL = append(aL, aLi...)
}
return aL, nil
}
func calcyzBatched(capV []curves.Point, capA, capS curves.Point, transcript *merlin.Transcript, curve curves.Curve) (curves.Scalar, curves.Scalar, error) {
// Add the A,S values to transcript
for _, capVi := range capV {
transcript.AppendMessage([]byte("addV"), capVi.ToAffineUncompressed())
}
transcript.AppendMessage([]byte("addcapA"), capA.ToAffineUncompressed())
transcript.AppendMessage([]byte("addcapS"), capS.ToAffineUncompressed())
// Read 64 bytes twice from, set to scalar for y and z
yBytes := transcript.ExtractBytes([]byte("gety"), 64)
y, err := curve.NewScalar().SetBytesWide(yBytes)
if err != nil {
return nil, nil, errors.Wrap(err, "calcyz NewScalar SetBytesWide")
}
zBytes := transcript.ExtractBytes([]byte("getz"), 64)
z, err := curve.NewScalar().SetBytesWide(zBytes)
if err != nil {
return nil, nil, errors.Wrap(err, "calcyz NewScalar SetBytesWide")
}
return y, z, nil
}
func deltayzBatched(y, z curves.Scalar, n, m int, curve curves.Curve) (curves.Scalar, error) {
// z - z^2
zMinuszsquare := z.Sub(z.Square())
// 1^(n*m)
onenm := get1nVector(n*m, curve)
// <1^nm, y^nm>
onenmdotynm, err := innerProduct(onenm, getknVector(y, n*m, curve))
if err != nil {
return nil, errors.Wrap(err, "deltayz")
}
// (z - z^2)*<1^n, y^n>
termFirst := zMinuszsquare.Mul(onenmdotynm)
// <1^n, 2^n>
onendottwon, err := innerProduct(get1nVector(n, curve), get2nVector(n, curve))
if err != nil {
return nil, errors.Wrap(err, "deltayz")
}
termSecond := curve.Scalar.Zero()
zExp := z.Square()
for j := 1; j < m+1; j++ {
zExp = zExp.Mul(z)
elem := zExp.Mul(onendottwon)
termSecond = termSecond.Add(elem)
}
// (z - z^2)*<1^n, y^n> - z^3*<1^n, 2^n>
out := termFirst.Sub(termSecond)
return out, nil
}
// Bottom equation on pg21.
func getPhmuBatched(proofG, proofHPrime []curves.Point, h, capA, capS curves.Point, x, y, z, mu curves.Scalar, n, m int, curve curves.Curve) curves.Point {
twoN := get2nVector(n, curve)
// h'^(z*y^n + z^2*2^n)
lastElem := curve.NewIdentityPoint()
zExp := z.Clone()
for j := 1; j < m+1; j++ {
// Get subvector of h
hSubvector := proofHPrime[(j-1)*n : j*n]
// z^(j+1)
zExp = zExp.Mul(z)
exp := multiplyScalarToScalarVector(zExp, twoN)
// Final elem
elem := curve.Point.SumOfProducts(hSubvector, exp)
lastElem = lastElem.Add(elem)
}
zynm := multiplyScalarToScalarVector(z, getknVector(y, n*m, curve))
hPrimezynm := curve.Point.SumOfProducts(proofHPrime, zynm)
lastElem = lastElem.Add(hPrimezynm)
// S^x
capSx := capS.Mul(x)
// g^-z --> -z*<1,g>
onenm := get1nVector(n*m, curve)
zNeg := z.Neg()
zinvonen := multiplyScalarToScalarVector(zNeg, onenm)
zgdotonen := curve.Point.SumOfProducts(proofG, zinvonen)
// L66 on pg20
P := capA.Add(capSx).Add(zgdotonen).Add(lastElem)
hmu := h.Mul(mu)
Phmu := P.Sub(hmu)
return Phmu
}

View File

@ -0,0 +1,102 @@
package bulletproof
import (
crand "crypto/rand"
"testing"
"github.com/gtank/merlin"
"github.com/stretchr/testify/require"
"github.com/onsonr/sonr/crypto/core/curves"
)
func TestRangeBatchProverHappyPath(t *testing.T) {
curve := curves.ED25519()
n := 256
prover, err := NewRangeProver(n*4, []byte("rangeDomain"), []byte("ippDomain"), *curve)
require.NoError(t, err)
v1 := curve.Scalar.Random(crand.Reader)
v2 := curve.Scalar.Random(crand.Reader)
v3 := curve.Scalar.Random(crand.Reader)
v4 := curve.Scalar.Random(crand.Reader)
v := []curves.Scalar{v1, v2, v3, v4}
gamma1 := curve.Scalar.Random(crand.Reader)
gamma2 := curve.Scalar.Random(crand.Reader)
gamma3 := curve.Scalar.Random(crand.Reader)
gamma4 := curve.Scalar.Random(crand.Reader)
gamma := []curves.Scalar{gamma1, gamma2, gamma3, gamma4}
g := curve.Point.Random(crand.Reader)
h := curve.Point.Random(crand.Reader)
u := curve.Point.Random(crand.Reader)
proofGenerators := RangeProofGenerators{
g: g,
h: h,
u: u,
}
transcript := merlin.NewTranscript("test")
proof, err := prover.BatchProve(v, gamma, n, proofGenerators, transcript)
require.NoError(t, err)
require.NotNil(t, proof)
require.Equal(t, 10, len(proof.ipp.capLs))
require.Equal(t, 10, len(proof.ipp.capRs))
}
func TestGetaLBatched(t *testing.T) {
curve := curves.ED25519()
v1 := curve.Scalar.Random(crand.Reader)
v2 := curve.Scalar.Random(crand.Reader)
v3 := curve.Scalar.Random(crand.Reader)
v4 := curve.Scalar.Random(crand.Reader)
v := []curves.Scalar{v1, v2, v3, v4}
aL, err := getaLBatched(v, 256, *curve)
require.NoError(t, err)
twoN := get2nVector(256, *curve)
for i := 1; i < len(v)+1; i++ {
vec := aL[(i-1)*256 : i*256]
product, err := innerProduct(vec, twoN)
require.NoError(t, err)
require.Zero(t, product.Cmp(v[i-1]))
}
}
func TestRangeBatchProverMarshal(t *testing.T) {
curve := curves.ED25519()
n := 256
prover, err := NewRangeProver(n*4, []byte("rangeDomain"), []byte("ippDomain"), *curve)
require.NoError(t, err)
v1 := curve.Scalar.Random(crand.Reader)
v2 := curve.Scalar.Random(crand.Reader)
v3 := curve.Scalar.Random(crand.Reader)
v4 := curve.Scalar.Random(crand.Reader)
v := []curves.Scalar{v1, v2, v3, v4}
gamma1 := curve.Scalar.Random(crand.Reader)
gamma2 := curve.Scalar.Random(crand.Reader)
gamma3 := curve.Scalar.Random(crand.Reader)
gamma4 := curve.Scalar.Random(crand.Reader)
gamma := []curves.Scalar{gamma1, gamma2, gamma3, gamma4}
g := curve.Point.Random(crand.Reader)
h := curve.Point.Random(crand.Reader)
u := curve.Point.Random(crand.Reader)
proofGenerators := RangeProofGenerators{
g: g,
h: h,
u: u,
}
transcript := merlin.NewTranscript("test")
proof, err := prover.BatchProve(v, gamma, n, proofGenerators, transcript)
require.NoError(t, err)
proofMarshaled := proof.MarshalBinary()
proofPrime := NewRangeProof(curve)
err = proofPrime.UnmarshalBinary(proofMarshaled)
require.NoError(t, err)
require.True(t, proof.capA.Equal(proofPrime.capA))
require.True(t, proof.capS.Equal(proofPrime.capS))
require.True(t, proof.capT1.Equal(proofPrime.capT1))
require.True(t, proof.capT2.Equal(proofPrime.capT2))
require.Zero(t, proof.taux.Cmp(proofPrime.taux))
require.Zero(t, proof.mu.Cmp(proofPrime.mu))
require.Zero(t, proof.tHat.Cmp(proofPrime.tHat))
}

View File

@ -0,0 +1,91 @@
package bulletproof
import (
"github.com/gtank/merlin"
"github.com/pkg/errors"
"github.com/onsonr/sonr/crypto/core/curves"
)
// VerifyBatched verifies a given batched range proof.
// It takes in a list of commitments to the secret values as capV instead of a single commitment to a single point
// when compared to the unbatched single range proof case.
func (verifier *RangeVerifier) VerifyBatched(proof *RangeProof, capV []curves.Point, proofGenerators RangeProofGenerators, n int, transcript *merlin.Transcript) (bool, error) {
// Define nm as the total bits required for secrets, calculated as number of secrets * n
m := len(capV)
nm := n * m
// nm must be less than the number of generators generated
if nm > len(verifier.generators.G) {
return false, errors.New("ipp vector length must be less than maxVectorLength")
}
// In case where len(a) is less than number of generators precomputed by prover, trim to length
proofG := verifier.generators.G[0:nm]
proofH := verifier.generators.H[0:nm]
// Calc y,z,x from Fiat Shamir heuristic
y, z, err := calcyzBatched(capV, proof.capA, proof.capS, transcript, verifier.curve)
if err != nil {
return false, errors.Wrap(err, "rangeproof verify")
}
x, err := calcx(proof.capT1, proof.capT2, transcript, verifier.curve)
if err != nil {
return false, errors.Wrap(err, "rangeproof verify")
}
wBytes := transcript.ExtractBytes([]byte("getw"), 64)
w, err := verifier.curve.NewScalar().SetBytesWide(wBytes)
if err != nil {
return false, errors.Wrap(err, "rangeproof prove")
}
// Calc delta(y,z), redefined for batched case on pg21
deltayzBatched, err := deltayzBatched(y, z, n, m, verifier.curve)
if err != nil {
return false, errors.Wrap(err, "rangeproof verify")
}
// Check tHat: L65, pg20
// See equation 72 on pg21
tHatIsValid := verifier.checktHatBatched(proof, capV, proofGenerators.g, proofGenerators.h, deltayzBatched, x, z, m)
if !tHatIsValid {
return false, errors.New("rangeproof verify tHat is invalid")
}
// Verify IPP
hPrime, err := gethPrime(proofH, y, verifier.curve)
if err != nil {
return false, errors.Wrap(err, "rangeproof verify")
}
capPhmu := getPhmuBatched(proofG, hPrime, proofGenerators.h, proof.capA, proof.capS, x, y, z, proof.mu, n, m, verifier.curve)
ippVerified, err := verifier.ippVerifier.VerifyFromRangeProof(proofG, hPrime, capPhmu, proofGenerators.u.Mul(w), proof.tHat, proof.ipp, transcript)
if err != nil {
return false, errors.Wrap(err, "rangeproof verify")
}
return ippVerified, nil
}
// L65, pg20.
func (verifier *RangeVerifier) checktHatBatched(proof *RangeProof, capV []curves.Point, g, h curves.Point, deltayz, x, z curves.Scalar, m int) bool {
// g^tHat * h^tau_x
gtHat := g.Mul(proof.tHat)
htaux := h.Mul(proof.taux)
lhs := gtHat.Add(htaux)
// V^z^2 * g^delta(y,z) * Tau_1^x * Tau_2^x^2
// g^delta(y,z) * V^(z^2*z^m) * Tau_1^x * Tau_2^x^2
zm := getknVector(z, m, verifier.curve)
zsquarezm := multiplyScalarToScalarVector(z.Square(), zm)
capVzsquaretwom := verifier.curve.Point.SumOfProducts(capV, zsquarezm)
gdeltayz := g.Mul(deltayz)
capTau1x := proof.capT1.Mul(x)
capTau2xsquare := proof.capT2.Mul(x.Square())
rhs := capVzsquaretwom.Add(gdeltayz).Add(capTau1x).Add(capTau2xsquare)
// Compare lhs =? rhs
return lhs.Equal(rhs)
}

View File

@ -0,0 +1,148 @@
package bulletproof
import (
crand "crypto/rand"
"testing"
"github.com/gtank/merlin"
"github.com/stretchr/testify/require"
"github.com/onsonr/sonr/crypto/core/curves"
)
func TestRangeBatchVerifyHappyPath(t *testing.T) {
curve := curves.ED25519()
n := 256
prover, err := NewRangeProver(n*4, []byte("rangeDomain"), []byte("ippDomain"), *curve)
require.NoError(t, err)
v1 := curve.Scalar.Random(crand.Reader)
v2 := curve.Scalar.Random(crand.Reader)
v3 := curve.Scalar.Random(crand.Reader)
v4 := curve.Scalar.Random(crand.Reader)
v := []curves.Scalar{v1, v2, v3, v4}
gamma1 := curve.Scalar.Random(crand.Reader)
gamma2 := curve.Scalar.Random(crand.Reader)
gamma3 := curve.Scalar.Random(crand.Reader)
gamma4 := curve.Scalar.Random(crand.Reader)
gamma := []curves.Scalar{gamma1, gamma2, gamma3, gamma4}
g := curve.Point.Random(crand.Reader)
h := curve.Point.Random(crand.Reader)
u := curve.Point.Random(crand.Reader)
proofGenerators := RangeProofGenerators{
g: g,
h: h,
u: u,
}
transcript := merlin.NewTranscript("test")
proof, err := prover.BatchProve(v, gamma, n, proofGenerators, transcript)
require.NoError(t, err)
verifier, err := NewRangeVerifier(n*4, []byte("rangeDomain"), []byte("ippDomain"), *curve)
require.NoError(t, err)
transcriptVerifier := merlin.NewTranscript("test")
capV := getcapVBatched(v, gamma, g, h)
verified, err := verifier.VerifyBatched(proof, capV, proofGenerators, n, transcriptVerifier)
require.NoError(t, err)
require.True(t, verified)
}
func TestRangeBatchVerifyNotInRange(t *testing.T) {
curve := curves.ED25519()
n := 2
prover, err := NewRangeProver(n*4, []byte("rangeDomain"), []byte("ippDomain"), *curve)
require.NoError(t, err)
v1 := curve.Scalar.One()
v2 := curve.Scalar.Random(crand.Reader)
v3 := curve.Scalar.Random(crand.Reader)
v4 := curve.Scalar.Random(crand.Reader)
v := []curves.Scalar{v1, v2, v3, v4}
gamma1 := curve.Scalar.Random(crand.Reader)
gamma2 := curve.Scalar.Random(crand.Reader)
gamma3 := curve.Scalar.Random(crand.Reader)
gamma4 := curve.Scalar.Random(crand.Reader)
gamma := []curves.Scalar{gamma1, gamma2, gamma3, gamma4}
g := curve.Point.Random(crand.Reader)
h := curve.Point.Random(crand.Reader)
u := curve.Point.Random(crand.Reader)
proofGenerators := RangeProofGenerators{
g: g,
h: h,
u: u,
}
transcript := merlin.NewTranscript("test")
_, err = prover.BatchProve(v, gamma, n, proofGenerators, transcript)
require.Error(t, err)
}
func TestRangeBatchVerifyNonRandom(t *testing.T) {
curve := curves.ED25519()
n := 2
prover, err := NewRangeProver(n*4, []byte("rangeDomain"), []byte("ippDomain"), *curve)
require.NoError(t, err)
v1 := curve.Scalar.One()
v2 := curve.Scalar.One()
v3 := curve.Scalar.One()
v4 := curve.Scalar.One()
v := []curves.Scalar{v1, v2, v3, v4}
gamma1 := curve.Scalar.Random(crand.Reader)
gamma2 := curve.Scalar.Random(crand.Reader)
gamma3 := curve.Scalar.Random(crand.Reader)
gamma4 := curve.Scalar.Random(crand.Reader)
gamma := []curves.Scalar{gamma1, gamma2, gamma3, gamma4}
g := curve.Point.Random(crand.Reader)
h := curve.Point.Random(crand.Reader)
u := curve.Point.Random(crand.Reader)
proofGenerators := RangeProofGenerators{
g: g,
h: h,
u: u,
}
transcript := merlin.NewTranscript("test")
proof, err := prover.BatchProve(v, gamma, n, proofGenerators, transcript)
require.NoError(t, err)
verifier, err := NewRangeVerifier(n*4, []byte("rangeDomain"), []byte("ippDomain"), *curve)
require.NoError(t, err)
transcriptVerifier := merlin.NewTranscript("test")
capV := getcapVBatched(v, gamma, g, h)
verified, err := verifier.VerifyBatched(proof, capV, proofGenerators, n, transcriptVerifier)
require.NoError(t, err)
require.True(t, verified)
}
func TestRangeBatchVerifyInvalid(t *testing.T) {
curve := curves.ED25519()
n := 2
prover, err := NewRangeProver(n*4, []byte("rangeDomain"), []byte("ippDomain"), *curve)
require.NoError(t, err)
v1 := curve.Scalar.One()
v2 := curve.Scalar.One()
v3 := curve.Scalar.One()
v4 := curve.Scalar.One()
v := []curves.Scalar{v1, v2, v3, v4}
gamma1 := curve.Scalar.Random(crand.Reader)
gamma2 := curve.Scalar.Random(crand.Reader)
gamma3 := curve.Scalar.Random(crand.Reader)
gamma4 := curve.Scalar.Random(crand.Reader)
gamma := []curves.Scalar{gamma1, gamma2, gamma3, gamma4}
g := curve.Point.Random(crand.Reader)
h := curve.Point.Random(crand.Reader)
u := curve.Point.Random(crand.Reader)
proofGenerators := RangeProofGenerators{
g: g,
h: h,
u: u,
}
transcript := merlin.NewTranscript("test")
proof, err := prover.BatchProve(v, gamma, n, proofGenerators, transcript)
require.NoError(t, err)
verifier, err := NewRangeVerifier(n*4, []byte("rangeDomain"), []byte("ippDomain"), *curve)
require.NoError(t, err)
transcriptVerifier := merlin.NewTranscript("test")
capV := getcapVBatched(v, gamma, g, h)
capV[0] = curve.Point.Random(crand.Reader)
verified, err := verifier.VerifyBatched(proof, capV, proofGenerators, n, transcriptVerifier)
require.Error(t, err)
require.False(t, verified)
}

View File

@ -0,0 +1,476 @@
//
// Copyright Coinbase, Inc. All Rights Reserved.
//
// SPDX-License-Identifier: Apache-2.0
//
// Package bulletproof implements the zero knowledge protocol bulletproofs as defined in https://eprint.iacr.org/2017/1066.pdf
package bulletproof
import (
crand "crypto/rand"
"math/big"
"github.com/gtank/merlin"
"github.com/pkg/errors"
"github.com/onsonr/sonr/crypto/core/curves"
)
// RangeProver is the struct used to create RangeProofs
// It specifies which curve to use and holds precomputed generators
// See NewRangeProver() for prover initialization.
type RangeProver struct {
curve curves.Curve
generators *ippGenerators
ippProver *InnerProductProver
}
// RangeProof is the struct used to hold a range proof
// capA is a commitment to a_L and a_R using randomness alpha
// capS is a commitment to s_L and s_R using randomness rho
// capTau1,2 are commitments to t1,t2 respectively using randomness tau_1,2
// tHat represents t(X) as defined on page 19
// taux is the blinding factor for tHat
// ipp is the inner product proof used for compacting the transfer of l,r (See 4.2 on pg20).
type RangeProof struct {
capA, capS, capT1, capT2 curves.Point
taux, mu, tHat curves.Scalar
ipp *InnerProductProof
curve *curves.Curve
}
type RangeProofGenerators struct {
g, h, u curves.Point
}
// NewRangeProver initializes a new prover
// It uses the specified domain to generate generators for vectors of at most maxVectorLength
// A prover can be used to construct range proofs for vectors of length less than or equal to maxVectorLength
// A prover is defined by an explicit curve.
func NewRangeProver(maxVectorLength int, rangeDomain, ippDomain []byte, curve curves.Curve) (*RangeProver, error) {
generators, err := getGeneratorPoints(maxVectorLength, rangeDomain, curve)
if err != nil {
return nil, errors.Wrap(err, "range NewRangeProver")
}
ippProver, err := NewInnerProductProver(maxVectorLength, ippDomain, curve)
if err != nil {
return nil, errors.Wrap(err, "range NewRangeProver")
}
return &RangeProver{curve: curve, generators: generators, ippProver: ippProver}, nil
}
// NewRangeProof initializes a new RangeProof for a specified curve
// This should be used in tandem with UnmarshalBinary() to convert a marshaled proof into the struct.
func NewRangeProof(curve *curves.Curve) *RangeProof {
out := RangeProof{
capA: nil,
capS: nil,
capT1: nil,
capT2: nil,
taux: nil,
mu: nil,
tHat: nil,
ipp: NewInnerProductProof(curve),
curve: curve,
}
return &out
}
// Prove uses the range prover to prove that some value v is within the range [0, 2^n]
// It implements the protocol defined on pgs 19,20 in https://eprint.iacr.org/2017/1066.pdf
// v is the value of which to prove the range
// n is the power that specifies the upper bound of the range, ie. 2^n
// gamma is a scalar used for as a blinding factor
// g, h, u are unique points used as generators for the blinding factor
// transcript is a merlin transcript to be used for the fiat shamir heuristic.
func (prover *RangeProver) Prove(v, gamma curves.Scalar, n int, proofGenerators RangeProofGenerators, transcript *merlin.Transcript) (*RangeProof, error) {
// n must be less than or equal to the number of generators generated
if n > len(prover.generators.G) {
return nil, errors.New("ipp vector length must be less than or equal to maxVectorLength")
}
// In case where len(a) is less than number of generators precomputed by prover, trim to length
proofG := prover.generators.G[0:n]
proofH := prover.generators.H[0:n]
// Check that v is in range [0, 2^n]
if bigZero := big.NewInt(0); v.BigInt().Cmp(bigZero) == -1 {
return nil, errors.New("v is less than 0")
}
bigTwo := big.NewInt(2)
if n < 0 {
return nil, errors.New("n cannot be less than 0")
}
bigN := big.NewInt(int64(n))
var bigTwoToN big.Int
bigTwoToN.Exp(bigTwo, bigN, nil)
if v.BigInt().Cmp(&bigTwoToN) == 1 {
return nil, errors.New("v is greater than 2^n")
}
// L40 on pg19
aL, err := getaL(v, n, prover.curve)
if err != nil {
return nil, errors.Wrap(err, "rangeproof prove")
}
onen := get1nVector(n, prover.curve)
// L41 on pg19
aR, err := subtractPairwiseScalarVectors(aL, onen)
if err != nil {
return nil, errors.Wrap(err, "rangeproof prove")
}
alpha := prover.curve.Scalar.Random(crand.Reader)
// Calc A (L44, pg19)
halpha := proofGenerators.h.Mul(alpha)
gaL := prover.curve.Point.SumOfProducts(proofG, aL)
haR := prover.curve.Point.SumOfProducts(proofH, aR)
capA := halpha.Add(gaL).Add(haR)
// L45, 46, pg19
sL := getBlindingVector(n, prover.curve)
sR := getBlindingVector(n, prover.curve)
rho := prover.curve.Scalar.Random(crand.Reader)
// Calc S (L47, pg19)
hrho := proofGenerators.h.Mul(rho)
gsL := prover.curve.Point.SumOfProducts(proofG, sL)
hsR := prover.curve.Point.SumOfProducts(proofH, sR)
capS := hrho.Add(gsL).Add(hsR)
// Fiat Shamir for y,z (L49, pg19)
capV := getcapV(v, gamma, proofGenerators.g, proofGenerators.h)
y, z, err := calcyz(capV, capA, capS, transcript, prover.curve)
if err != nil {
return nil, errors.Wrap(err, "rangeproof prove")
}
// Calc t_1, t_2
// See the l(X), r(X), t(X) equations on pg 19
// Use l(X)'s and r(X)'s constant and linear terms to derive t_1 and t_2
// (a_l - z*1^n)
zonen := multiplyScalarToScalarVector(z, onen)
constantTerml, err := subtractPairwiseScalarVectors(aL, zonen)
if err != nil {
return nil, errors.Wrap(err, "rangeproof prove")
}
linearTerml := sL
// z^2 * 2^N
twoN := get2nVector(n, prover.curve)
zSquareTwon := multiplyScalarToScalarVector(z.Square(), twoN)
// a_r + z*1^n
aRPluszonen, err := addPairwiseScalarVectors(aR, zonen)
if err != nil {
return nil, errors.Wrap(err, "rangeproof prove")
}
yn := getknVector(y, n, prover.curve)
hadamard, err := multiplyPairwiseScalarVectors(yn, aRPluszonen)
if err != nil {
return nil, errors.Wrap(err, "rangeproof prove")
}
constantTermr, err := addPairwiseScalarVectors(hadamard, zSquareTwon)
if err != nil {
return nil, errors.Wrap(err, "rangeproof prove")
}
linearTermr, err := multiplyPairwiseScalarVectors(yn, sR)
if err != nil {
return nil, errors.Wrap(err, "rangeproof prove")
}
// t_1 (as the linear coefficient) is the sum of the dot products of l(X)'s linear term dot r(X)'s constant term
// and r(X)'s linear term dot l(X)'s constant term
t1FirstTerm, err := innerProduct(linearTerml, constantTermr)
if err != nil {
return nil, errors.Wrap(err, "rangeproof prove")
}
t1SecondTerm, err := innerProduct(linearTermr, constantTerml)
if err != nil {
return nil, errors.Wrap(err, "rangeproof prove")
}
t1 := t1FirstTerm.Add(t1SecondTerm)
// t_2 (as the quadratic coefficient) is the dot product of l(X)'s and r(X)'s linear terms
t2, err := innerProduct(linearTerml, linearTermr)
if err != nil {
return nil, errors.Wrap(err, "rangeproof prove")
}
// L52, pg20
tau1 := prover.curve.Scalar.Random(crand.Reader)
tau2 := prover.curve.Scalar.Random(crand.Reader)
// T_1, T_2 (L53, pg20)
capT1 := proofGenerators.g.Mul(t1).Add(proofGenerators.h.Mul(tau1))
capT2 := proofGenerators.g.Mul(t2).Add(proofGenerators.h.Mul(tau2))
// Fiat shamir for x (L55, pg20)
x, err := calcx(capT1, capT2, transcript, prover.curve)
if err != nil {
return nil, errors.Wrap(err, "rangeproof prove")
}
// Calc l (L58, pg20)
// Instead of using the expression in the line, evaluate l() at x
sLx := multiplyScalarToScalarVector(x, linearTerml)
l, err := addPairwiseScalarVectors(constantTerml, sLx)
if err != nil {
return nil, errors.Wrap(err, "rangeproof prove")
}
// Calc r (L59, pg20)
// Instead of using the expression in the line, evaluate r() at x
ynsRx := multiplyScalarToScalarVector(x, linearTermr)
r, err := addPairwiseScalarVectors(constantTermr, ynsRx)
if err != nil {
return nil, errors.Wrap(err, "rangeproof prove")
}
// Calc t hat (L60, pg20)
// For efficiency, instead of calculating the dot product, evaluate t() at x
deltayz, err := deltayz(y, z, n, prover.curve)
if err != nil {
return nil, errors.Wrap(err, "rangeproof prove")
}
t0 := v.Mul(z.Square()).Add(deltayz)
tLinear := t1.Mul(x)
tQuadratic := t2.Mul(x.Square())
tHat := t0.Add(tLinear).Add(tQuadratic)
// Calc tau_x (L61, pg20)
tau2xsquare := tau2.Mul(x.Square())
tau1x := tau1.Mul(x)
zsquaregamma := z.Square().Mul(gamma)
taux := tau2xsquare.Add(tau1x).Add(zsquaregamma)
// Calc mu (L62, pg20)
mu := alpha.Add(rho.Mul(x))
// Calc IPP (See section 4.2)
hPrime, err := gethPrime(proofH, y, prover.curve)
if err != nil {
return nil, errors.Wrap(err, "rangeproof prove")
}
capPhmu, err := getPhmu(proofG, hPrime, proofGenerators.h, capA, capS, x, y, z, mu, n, prover.curve)
if err != nil {
return nil, errors.Wrap(err, "rangeproof prove")
}
wBytes := transcript.ExtractBytes([]byte("getw"), 64)
w, err := prover.curve.NewScalar().SetBytesWide(wBytes)
if err != nil {
return nil, errors.Wrap(err, "rangeproof prove")
}
ipp, err := prover.ippProver.rangeToIPP(proofG, hPrime, l, r, tHat, capPhmu, proofGenerators.u.Mul(w), transcript)
if err != nil {
return nil, errors.Wrap(err, "rangeproof prove")
}
out := &RangeProof{
capA: capA,
capS: capS,
capT1: capT1,
capT2: capT2,
taux: taux,
mu: mu,
tHat: tHat,
ipp: ipp,
curve: &prover.curve,
}
return out, nil
}
// MarshalBinary takes a range proof and marshals into bytes.
func (proof *RangeProof) MarshalBinary() []byte {
var out []byte
out = append(out, proof.capA.ToAffineCompressed()...)
out = append(out, proof.capS.ToAffineCompressed()...)
out = append(out, proof.capT1.ToAffineCompressed()...)
out = append(out, proof.capT2.ToAffineCompressed()...)
out = append(out, proof.taux.Bytes()...)
out = append(out, proof.mu.Bytes()...)
out = append(out, proof.tHat.Bytes()...)
out = append(out, proof.ipp.MarshalBinary()...)
return out
}
// UnmarshalBinary takes bytes of a marshaled proof and writes them into a range proof
// The range proof used should be from the output of NewRangeProof().
func (proof *RangeProof) UnmarshalBinary(data []byte) error {
scalarLen := len(proof.curve.NewScalar().Bytes())
pointLen := len(proof.curve.NewGeneratorPoint().ToAffineCompressed())
ptr := 0
// Get points
capA, err := proof.curve.Point.FromAffineCompressed(data[ptr : ptr+pointLen])
if err != nil {
return errors.New("rangeProof UnmarshalBinary FromAffineCompressed")
}
proof.capA = capA
ptr += pointLen
capS, err := proof.curve.Point.FromAffineCompressed(data[ptr : ptr+pointLen])
if err != nil {
return errors.New("rangeProof UnmarshalBinary FromAffineCompressed")
}
proof.capS = capS
ptr += pointLen
capT1, err := proof.curve.Point.FromAffineCompressed(data[ptr : ptr+pointLen])
if err != nil {
return errors.New("rangeProof UnmarshalBinary FromAffineCompressed")
}
proof.capT1 = capT1
ptr += pointLen
capT2, err := proof.curve.Point.FromAffineCompressed(data[ptr : ptr+pointLen])
if err != nil {
return errors.New("rangeProof UnmarshalBinary FromAffineCompressed")
}
proof.capT2 = capT2
ptr += pointLen
// Get scalars
taux, err := proof.curve.NewScalar().SetBytes(data[ptr : ptr+scalarLen])
if err != nil {
return errors.New("rangeProof UnmarshalBinary SetBytes")
}
proof.taux = taux
ptr += scalarLen
mu, err := proof.curve.NewScalar().SetBytes(data[ptr : ptr+scalarLen])
if err != nil {
return errors.New("rangeProof UnmarshalBinary SetBytes")
}
proof.mu = mu
ptr += scalarLen
tHat, err := proof.curve.NewScalar().SetBytes(data[ptr : ptr+scalarLen])
if err != nil {
return errors.New("rangeProof UnmarshalBinary SetBytes")
}
proof.tHat = tHat
ptr += scalarLen
// Get IPP
err = proof.ipp.UnmarshalBinary(data[ptr:])
if err != nil {
return errors.New("rangeProof UnmarshalBinary")
}
return nil
}
// checkRange validates whether some scalar v is within the range [0, 2^n - 1]
// It will return an error if v is less than 0 or greater than 2^n - 1
// Otherwise it will return nil.
func checkRange(v curves.Scalar, n int) error {
bigOne := big.NewInt(1)
if n < 0 {
return errors.New("n cannot be less than 0")
}
var bigTwoToN big.Int
bigTwoToN.Lsh(bigOne, uint(n))
if v.BigInt().Cmp(&bigTwoToN) == 1 {
return errors.New("v is greater than 2^n")
}
return nil
}
// getBlindingVector returns a vector of scalars used as blinding factors for commitments.
func getBlindingVector(length int, curve curves.Curve) []curves.Scalar {
vec := make([]curves.Scalar, length)
for i := 0; i < length; i++ {
vec[i] = curve.Scalar.Random(crand.Reader)
}
return vec
}
// getcapV returns a commitment to v using blinding factor gamma.
func getcapV(v, gamma curves.Scalar, g, h curves.Point) curves.Point {
return h.Mul(gamma).Add(g.Mul(v))
}
// getaL obtains the bit vector representation of v
// See the a_L definition towards the bottom of pg 17 of https://eprint.iacr.org/2017/1066.pdf
func getaL(v curves.Scalar, n int, curve curves.Curve) ([]curves.Scalar, error) {
var err error
vBytes := v.Bytes()
zero := curve.Scalar.Zero()
one := curve.Scalar.One()
aL := make([]curves.Scalar, n)
for j := 0; j < len(aL); j++ {
aL[j] = zero
}
for i := 0; i < n; i++ {
ithBit := vBytes[i>>3] >> (i & 0x07) & 0x01
aL[i], err = cmoveScalar(zero, one, int(ithBit), curve)
if err != nil {
return nil, errors.Wrap(err, "getaL")
}
}
return aL, nil
}
// cmoveScalar provides a constant time operation that returns x if which is 0 and returns y if which is 1.
func cmoveScalar(x, y curves.Scalar, which int, curve curves.Curve) (curves.Scalar, error) {
if which != 0 && which != 1 {
return nil, errors.New("cmoveScalar which must be 0 or 1")
}
mask := -byte(which)
xBytes := x.Bytes()
yBytes := y.Bytes()
for i, xByte := range xBytes {
xBytes[i] ^= (xByte ^ yBytes[i]) & mask
}
out, err := curve.NewScalar().SetBytes(xBytes)
if err != nil {
return nil, errors.Wrap(err, "cmoveScalar SetBytes")
}
return out, nil
}
// calcyz uses a merlin transcript for Fiat Shamir
// It takes the current state of the transcript and appends the newly calculated capA and capS values
// Two new scalars are then read from the transcript
// See section 4.4 pg22 of https://eprint.iacr.org/2017/1066.pdf
func calcyz(capV, capA, capS curves.Point, transcript *merlin.Transcript, curve curves.Curve) (curves.Scalar, curves.Scalar, error) {
// Add the A,S values to transcript
transcript.AppendMessage([]byte("addV"), capV.ToAffineUncompressed())
transcript.AppendMessage([]byte("addcapA"), capA.ToAffineUncompressed())
transcript.AppendMessage([]byte("addcapS"), capS.ToAffineUncompressed())
// Read 64 bytes twice from, set to scalar for y and z
yBytes := transcript.ExtractBytes([]byte("gety"), 64)
y, err := curve.NewScalar().SetBytesWide(yBytes)
if err != nil {
return nil, nil, errors.Wrap(err, "calcyz NewScalar SetBytesWide")
}
zBytes := transcript.ExtractBytes([]byte("getz"), 64)
z, err := curve.NewScalar().SetBytesWide(zBytes)
if err != nil {
return nil, nil, errors.Wrap(err, "calcyz NewScalar SetBytesWide")
}
return y, z, nil
}
// calcx uses a merlin transcript for Fiat Shamir
// It takes the current state of the transcript and appends the newly calculated capT1 and capT2 values
// A new scalar is then read from the transcript
// See section 4.4 pg22 of https://eprint.iacr.org/2017/1066.pdf
func calcx(capT1, capT2 curves.Point, transcript *merlin.Transcript, curve curves.Curve) (curves.Scalar, error) {
// Add the Tau1,2 values to transcript
transcript.AppendMessage([]byte("addcapT1"), capT1.ToAffineUncompressed())
transcript.AppendMessage([]byte("addcapT2"), capT2.ToAffineUncompressed())
// Read 64 bytes from, set to scalar
outBytes := transcript.ExtractBytes([]byte("getx"), 64)
x, err := curve.NewScalar().SetBytesWide(outBytes)
if err != nil {
return nil, errors.Wrap(err, "calcx NewScalar SetBytesWide")
}
return x, nil
}

View File

@ -0,0 +1,86 @@
package bulletproof
import (
crand "crypto/rand"
"testing"
"github.com/gtank/merlin"
"github.com/stretchr/testify/require"
"github.com/onsonr/sonr/crypto/core/curves"
)
func TestRangeProverHappyPath(t *testing.T) {
curve := curves.ED25519()
n := 256
prover, err := NewRangeProver(n, []byte("rangeDomain"), []byte("ippDomain"), *curve)
require.NoError(t, err)
v := curve.Scalar.Random(crand.Reader)
gamma := curve.Scalar.Random(crand.Reader)
g := curve.Point.Random(crand.Reader)
h := curve.Point.Random(crand.Reader)
u := curve.Point.Random(crand.Reader)
proofGenerators := RangeProofGenerators{
g: g,
h: h,
u: u,
}
transcript := merlin.NewTranscript("test")
proof, err := prover.Prove(v, gamma, n, proofGenerators, transcript)
require.NoError(t, err)
require.NotNil(t, proof)
require.Equal(t, 8, len(proof.ipp.capLs))
require.Equal(t, 8, len(proof.ipp.capRs))
}
func TestGetaL(t *testing.T) {
curve := curves.ED25519()
v := curve.Scalar.Random(crand.Reader)
aL, err := getaL(v, 256, *curve)
require.NoError(t, err)
twoN := get2nVector(256, *curve)
product, err := innerProduct(aL, twoN)
require.NoError(t, err)
require.Zero(t, product.Cmp(v))
}
func TestCmove(t *testing.T) {
curve := curves.ED25519()
two := curve.Scalar.One().Double()
four := two.Double()
out, err := cmoveScalar(two, four, 1, *curve)
require.NoError(t, err)
require.Zero(t, out.Cmp(four))
}
func TestRangeProverMarshal(t *testing.T) {
curve := curves.ED25519()
n := 256
prover, err := NewRangeProver(n, []byte("rangeDomain"), []byte("ippDomain"), *curve)
require.NoError(t, err)
v := curve.Scalar.Random(crand.Reader)
gamma := curve.Scalar.Random(crand.Reader)
g := curve.Point.Random(crand.Reader)
h := curve.Point.Random(crand.Reader)
u := curve.Point.Random(crand.Reader)
proofGenerators := RangeProofGenerators{
g: g,
h: h,
u: u,
}
transcript := merlin.NewTranscript("test")
proof, err := prover.Prove(v, gamma, n, proofGenerators, transcript)
require.NoError(t, err)
proofMarshaled := proof.MarshalBinary()
proofPrime := NewRangeProof(curve)
err = proofPrime.UnmarshalBinary(proofMarshaled)
require.NoError(t, err)
require.True(t, proof.capA.Equal(proofPrime.capA))
require.True(t, proof.capS.Equal(proofPrime.capS))
require.True(t, proof.capT1.Equal(proofPrime.capT1))
require.True(t, proof.capT2.Equal(proofPrime.capT2))
require.Zero(t, proof.taux.Cmp(proofPrime.taux))
require.Zero(t, proof.mu.Cmp(proofPrime.mu))
require.Zero(t, proof.tHat.Cmp(proofPrime.tHat))
}

View File

@ -0,0 +1,187 @@
package bulletproof
import (
"github.com/gtank/merlin"
"github.com/pkg/errors"
"github.com/onsonr/sonr/crypto/core/curves"
)
// RangeVerifier is the struct used to verify RangeProofs
// It specifies which curve to use and holds precomputed generators
// See NewRangeVerifier() for verifier initialization.
type RangeVerifier struct {
curve curves.Curve
generators *ippGenerators
ippVerifier *InnerProductVerifier
}
// NewRangeVerifier initializes a new verifier
// It uses the specified domain to generate generators for vectors of at most maxVectorLength
// A verifier can be used to verify range proofs for vectors of length less than or equal to maxVectorLength
// A verifier is defined by an explicit curve.
func NewRangeVerifier(maxVectorLength int, rangeDomain, ippDomain []byte, curve curves.Curve) (*RangeVerifier, error) {
generators, err := getGeneratorPoints(maxVectorLength, rangeDomain, curve)
if err != nil {
return nil, errors.Wrap(err, "range NewRangeProver")
}
ippVerifier, err := NewInnerProductVerifier(maxVectorLength, ippDomain, curve)
if err != nil {
return nil, errors.Wrap(err, "range NewRangeProver")
}
return &RangeVerifier{curve: curve, generators: generators, ippVerifier: ippVerifier}, nil
}
// Verify verifies the given range proof inputs
// It implements the checking of L65 on pg 20
// It also verifies the dot product of <l,r> using the inner product proof\
// capV is a commitment to v using blinding factor gamma
// n is the power that specifies the upper bound of the range, ie. 2^n
// g, h, u are unique points used as generators for the blinding factor
// transcript is a merlin transcript to be used for the fiat shamir heuristic.
func (verifier *RangeVerifier) Verify(proof *RangeProof, capV curves.Point, proofGenerators RangeProofGenerators, n int, transcript *merlin.Transcript) (bool, error) {
// Length of vectors must be less than the number of generators generated
if n > len(verifier.generators.G) {
return false, errors.New("ipp vector length must be less than maxVectorLength")
}
// In case where len(a) is less than number of generators precomputed by prover, trim to length
proofG := verifier.generators.G[0:n]
proofH := verifier.generators.H[0:n]
// Calc y,z,x from Fiat Shamir heuristic
y, z, err := calcyz(capV, proof.capA, proof.capS, transcript, verifier.curve)
if err != nil {
return false, errors.Wrap(err, "rangeproof verify")
}
x, err := calcx(proof.capT1, proof.capT2, transcript, verifier.curve)
if err != nil {
return false, errors.Wrap(err, "rangeproof verify")
}
wBytes := transcript.ExtractBytes([]byte("getw"), 64)
w, err := verifier.curve.NewScalar().SetBytesWide(wBytes)
if err != nil {
return false, errors.Wrap(err, "rangeproof prove")
}
// Calc delta(y,z)
deltayz, err := deltayz(y, z, n, verifier.curve)
if err != nil {
return false, errors.Wrap(err, "rangeproof verify")
}
// Check tHat: L65, pg20
tHatIsValid := verifier.checktHat(proof, capV, proofGenerators.g, proofGenerators.h, deltayz, x, z)
if !tHatIsValid {
return false, errors.New("rangeproof verify tHat is invalid")
}
// Verify IPP
hPrime, err := gethPrime(proofH, y, verifier.curve)
if err != nil {
return false, errors.Wrap(err, "rangeproof verify")
}
capPhmu, err := getPhmu(proofG, hPrime, proofGenerators.h, proof.capA, proof.capS, x, y, z, proof.mu, n, verifier.curve)
if err != nil {
return false, errors.Wrap(err, "rangeproof verify")
}
ippVerified, err := verifier.ippVerifier.VerifyFromRangeProof(proofG, hPrime, capPhmu, proofGenerators.u.Mul(w), proof.tHat, proof.ipp, transcript)
if err != nil {
return false, errors.Wrap(err, "rangeproof verify")
}
return ippVerified, nil
}
// L65, pg20.
func (*RangeVerifier) checktHat(proof *RangeProof, capV, g, h curves.Point, deltayz, x, z curves.Scalar) bool {
// g^tHat * h^tau_x
gtHat := g.Mul(proof.tHat)
htaux := h.Mul(proof.taux)
lhs := gtHat.Add(htaux)
// V^z^2 * g^delta(y,z) * Tau_1^x * Tau_2^x^2
capVzsquare := capV.Mul(z.Square())
gdeltayz := g.Mul(deltayz)
capTau1x := proof.capT1.Mul(x)
capTau2xsquare := proof.capT2.Mul(x.Square())
rhs := capVzsquare.Add(gdeltayz).Add(capTau1x).Add(capTau2xsquare)
// Compare lhs =? rhs
return lhs.Equal(rhs)
}
// gethPrime calculates new h prime generators as defined in L64 on pg20.
func gethPrime(h []curves.Point, y curves.Scalar, curve curves.Curve) ([]curves.Point, error) {
hPrime := make([]curves.Point, len(h))
yInv, err := y.Invert()
yInvn := getknVector(yInv, len(h), curve)
if err != nil {
return nil, errors.Wrap(err, "gethPrime")
}
for i, hElem := range h {
hPrime[i] = hElem.Mul(yInvn[i])
}
return hPrime, nil
}
// Obtain P used for IPP verification
// See L67 on pg20
// Note P on L66 includes blinding factor hmu, this method removes that factor.
func getPhmu(proofG, proofHPrime []curves.Point, h, capA, capS curves.Point, x, y, z, mu curves.Scalar, n int, curve curves.Curve) (curves.Point, error) {
// h'^(z*y^n + z^2*2^n)
zyn := multiplyScalarToScalarVector(z, getknVector(y, n, curve))
zsquaretwon := multiplyScalarToScalarVector(z.Square(), get2nVector(n, curve))
elemLastExponent, err := addPairwiseScalarVectors(zyn, zsquaretwon)
if err != nil {
return nil, errors.Wrap(err, "getPhmu")
}
lastElem := curve.Point.SumOfProducts(proofHPrime, elemLastExponent)
// S^x
capSx := capS.Mul(x)
// g^-z --> -z*<1,g>
onen := get1nVector(n, curve)
zNeg := z.Neg()
zinvonen := multiplyScalarToScalarVector(zNeg, onen)
zgdotonen := curve.Point.SumOfProducts(proofG, zinvonen)
// L66 on pg20
P := capA.Add(capSx).Add(zgdotonen).Add(lastElem)
hmu := h.Mul(mu)
Phmu := P.Sub(hmu)
return Phmu, nil
}
// Delta function for delta(y,z), See (39) on pg18.
func deltayz(y, z curves.Scalar, n int, curve curves.Curve) (curves.Scalar, error) {
// z - z^2
zMinuszsquare := z.Sub(z.Square())
// 1^n
onen := get1nVector(n, curve)
// <1^n, y^n>
onendotyn, err := innerProduct(onen, getknVector(y, n, curve))
if err != nil {
return nil, errors.Wrap(err, "deltayz")
}
// (z - z^2)*<1^n, y^n>
termFirst := zMinuszsquare.Mul(onendotyn)
// <1^n, 2^n>
onendottwon, err := innerProduct(onen, get2nVector(n, curve))
if err != nil {
return nil, errors.Wrap(err, "deltayz")
}
// z^3*<1^n, 2^n>
termSecond := z.Cube().Mul(onendottwon)
// (z - z^2)*<1^n, y^n> - z^3*<1^n, 2^n>
out := termFirst.Sub(termSecond)
return out, nil
}

View File

@ -0,0 +1,87 @@
package bulletproof
import (
crand "crypto/rand"
"testing"
"github.com/gtank/merlin"
"github.com/stretchr/testify/require"
"github.com/onsonr/sonr/crypto/core/curves"
)
func TestRangeVerifyHappyPath(t *testing.T) {
curve := curves.ED25519()
n := 256
prover, err := NewRangeProver(n, []byte("rangeDomain"), []byte("ippDomain"), *curve)
require.NoError(t, err)
v := curve.Scalar.Random(crand.Reader)
gamma := curve.Scalar.Random(crand.Reader)
g := curve.Point.Random(crand.Reader)
h := curve.Point.Random(crand.Reader)
u := curve.Point.Random(crand.Reader)
proofGenerators := RangeProofGenerators{
g: g,
h: h,
u: u,
}
transcript := merlin.NewTranscript("test")
proof, err := prover.Prove(v, gamma, n, proofGenerators, transcript)
require.NoError(t, err)
verifier, err := NewRangeVerifier(n, []byte("rangeDomain"), []byte("ippDomain"), *curve)
require.NoError(t, err)
transcriptVerifier := merlin.NewTranscript("test")
capV := getcapV(v, gamma, g, h)
verified, err := verifier.Verify(proof, capV, proofGenerators, n, transcriptVerifier)
require.NoError(t, err)
require.True(t, verified)
}
func TestRangeVerifyNotInRange(t *testing.T) {
curve := curves.ED25519()
n := 2
prover, err := NewRangeProver(n, []byte("rangeDomain"), []byte("ippDomain"), *curve)
require.NoError(t, err)
v := curve.Scalar.Random(crand.Reader)
gamma := curve.Scalar.Random(crand.Reader)
g := curve.Point.Random(crand.Reader)
h := curve.Point.Random(crand.Reader)
u := curve.Point.Random(crand.Reader)
proofGenerators := RangeProofGenerators{
g: g,
h: h,
u: u,
}
transcript := merlin.NewTranscript("test")
_, err = prover.Prove(v, gamma, n, proofGenerators, transcript)
require.Error(t, err)
}
func TestRangeVerifyNonRandom(t *testing.T) {
curve := curves.ED25519()
n := 2
prover, err := NewRangeProver(n, []byte("rangeDomain"), []byte("ippDomain"), *curve)
require.NoError(t, err)
v := curve.Scalar.One()
gamma := curve.Scalar.Random(crand.Reader)
g := curve.Point.Random(crand.Reader)
h := curve.Point.Random(crand.Reader)
u := curve.Point.Random(crand.Reader)
proofGenerators := RangeProofGenerators{
g: g,
h: h,
u: u,
}
transcript := merlin.NewTranscript("test")
proof, err := prover.Prove(v, gamma, n, proofGenerators, transcript)
require.NoError(t, err)
verifier, err := NewRangeVerifier(n, []byte("rangeDomain"), []byte("ippDomain"), *curve)
require.NoError(t, err)
transcriptVerifier := merlin.NewTranscript("test")
capV := getcapV(v, gamma, g, h)
verified, err := verifier.Verify(proof, capV, proofGenerators, n, transcriptVerifier)
require.NoError(t, err)
require.True(t, verified)
}

14
crypto/core/README.md Executable file
View File

@ -0,0 +1,14 @@
---
aliases: [README]
tags: []
title: README
linter-yaml-title-alias: README
date created: Wednesday, April 17th 2024, 4:11:40 pm
date modified: Thursday, April 18th 2024, 8:19:25 am
---
## Core Package
The core package contains a set of primitives, including but not limited to various
elliptic curves, hashes, and commitment schemes. These primitives are used internally
and can also be used independently on their own externally.

115
crypto/core/commit.go Executable file
View File

@ -0,0 +1,115 @@
//
// Copyright Coinbase, Inc. All Rights Reserved.
//
// SPDX-License-Identifier: Apache-2.0
//
package core
import (
"crypto/hmac"
crand "crypto/rand"
"crypto/sha256"
"crypto/subtle"
"encoding/json"
"fmt"
"hash"
)
// Size of random values and hash outputs are determined by our hash function
const Size = sha256.Size
type (
// Commitment to a given message which can be later revealed.
// This is sent to and held by a verifier until the corresponding
// witness is provided.
Commitment []byte
// Witness is sent to and opened by the verifier. This proves that
// committed message hasn't been altered by later information.
Witness struct {
Msg []byte
r [Size]byte
}
// witnessJSON is used for un/marshaling.
witnessJSON struct {
Msg []byte
R [Size]byte
}
)
// MarshalJSON encodes Witness in JSON
func (w Witness) MarshalJSON() ([]byte, error) {
return json.Marshal(witnessJSON{w.Msg, w.r})
}
// UnmarshalJSON decodes JSON into a Witness struct
func (w *Witness) UnmarshalJSON(data []byte) error {
witness := &witnessJSON{}
err := json.Unmarshal(data, witness)
if err != nil {
return err
}
w.Msg = witness.Msg
w.r = witness.R
return nil
}
// Commit to a given message. Uses SHA256 as the hash function.
func Commit(msg []byte) (Commitment, *Witness, error) {
// Initialize our decommitment
d := Witness{msg, [Size]byte{}}
// Generate a random nonce of the required length
n, err := crand.Read(d.r[:])
// Ensure no errors retrieving nonce
if err != nil {
return nil, nil, err
}
// Ensure we read all the bytes expected
if n != Size {
return nil, nil, fmt.Errorf("failed to read %v bytes from crypto/rand: received %v bytes", Size, n)
}
// Compute the commitment: HMAC(Sha2, msg, key)
c, err := ComputeHMAC(sha256.New, msg, d.r[:])
if err != nil {
return nil, nil, err
}
return c, &d, nil
}
// Open a commitment and return true if the commitment/decommitment pair are valid.
// reference: spec.§2.4: Commitment Scheme
func Open(c Commitment, d Witness) (bool, error) {
// Ensure commitment is well-formed.
if len(c) != Size {
return false, fmt.Errorf("invalid commitment, wrong length. %v != %v", len(c), Size)
}
// Re-compute the commitment: HMAC(Sha2, msg, key)
cʹ, err := ComputeHMAC(sha256.New, d.Msg, d.r[:])
if err != nil {
return false, err
}
return subtle.ConstantTimeCompare(cʹ, c) == 1, nil
}
// ComputeHMAC computes HMAC(hash_fn, msg, key)
// Takes in a hash function to use for HMAC
func ComputeHMAC(f func() hash.Hash, msg []byte, k []byte) ([]byte, error) {
if f == nil {
return nil, fmt.Errorf("hash function cannot be nil")
}
mac := hmac.New(f, k)
w, err := mac.Write(msg)
if w != len(msg) {
return nil, fmt.Errorf("bytes written to hash doesn't match expected: %v != %v", w, len(msg))
} else if err != nil {
return nil, err
}
return mac.Sum(nil), nil
}

374
crypto/core/commit_test.go Executable file
View File

@ -0,0 +1,374 @@
//
// Copyright Coinbase, Inc. All Rights Reserved.
//
// SPDX-License-Identifier: Apache-2.0
//
package core
import (
"bytes"
"encoding/json"
"testing"
"github.com/stretchr/testify/require"
)
// An entry into our test table
type entry struct {
// Input
msg []byte
// Result (actual, not expected)
commit Commitment
decommit *Witness
err error
}
// Test inputs and placeholders for results that will be filled in
// during init()
var testResults = []entry{
{[]byte("This is a test message"), nil, nil, nil},
{[]byte("short msg"), nil, nil, nil},
{[]byte("This input field is intentionally longer than the SHA256 block size to ensure that the entire message is processed"),
nil, nil, nil},
{[]byte{0xFB, 0x1A, 0x18, 0x47, 0x39, 0x3C, 0x9F, 0x45, 0x5F, 0x29, 0x4C, 0x51, 0x42, 0x30, 0xA6, 0xB9},
nil, nil, nil},
// msg = \epsilon (empty string)
{[]byte{}, nil, nil, nil},
// msg == nil
{nil, nil, nil, nil},
}
// Run our inputs through commit and record the outputs
func init() {
for i := range testResults {
entry := &testResults[i]
entry.commit, entry.decommit, entry.err = Commit(entry.msg)
}
}
// Computing commitments should never produce errors
func TestCommitWithoutErrors(t *testing.T) {
for _, entry := range testResults {
if entry.err != nil {
t.Errorf("received Commit(%v): %v", entry.msg, entry.err)
}
}
}
// Commitments should be 256b == 64B in length
func TestCommitmentsAreExpectedLength(t *testing.T) {
const expLen = 256 / 8
for _, entry := range testResults {
if len(entry.commit) != expLen {
t.Errorf("commitment is not expected length: %v != %v", len(entry.commit), expLen)
}
}
}
// Decommit cannot be nil
func TestCommmitProducesDecommit(t *testing.T) {
for _, entry := range testResults {
if entry.decommit == nil {
t.Errorf("decommit cannot be nil: Commit(%v)", entry.msg)
}
}
}
// Decommit value should contain the same message
func TestCommmitProducesDecommitWithSameMessage(t *testing.T) {
for _, entry := range testResults {
if !bytes.Equal(entry.msg, entry.decommit.Msg) {
t.Errorf("decommit.msg != msg: %v != %v", entry.msg, entry.decommit.Msg)
}
}
}
// Commitments should be unique
func TestCommmitProducesDistinctCommitments(t *testing.T) {
seen := make(map[[Size]byte]bool)
// Check the pre-computed commitments for uniquness
for _, entry := range testResults {
// Slices cannot be used as hash keys, so we need to copy into
// an array. Oh, go-lang.
var cee [Size]byte
copy(cee[:], entry.commit)
// Ensure each commit is unique
if seen[cee] {
t.Errorf("duplicate commit found: %v", cee)
}
seen[cee] = true
}
}
// Commitments should be unique even for the same message since the nonce is
// randomly selected
func TestCommmitDistinctCommitments(t *testing.T) {
seen := make(map[[Size]byte]bool)
msg := []byte("black lives matter")
const iterations = 1000
// Check the pre-computed commitments for uniquness
for i := 0; i < iterations; i++ {
// Compute a commitment
c, _, err := Commit(msg)
if err != nil {
t.Error(err)
}
// Slices cannot be used as hash keys, so copy into an array
var cee [Size]byte
copy(cee[:], []byte(c))
// Ensure each commit is unique
if seen[cee] {
t.Errorf("duplicate commit found: %v", cee)
}
seen[cee] = true
}
}
// Nonces must be 256b = 64B
func TestCommmitNonceIsExpectedLength(t *testing.T) {
const expLen = 256 / 8
// Check the pre-computed nonces
for _, entry := range testResults {
if len(entry.decommit.r) != expLen {
t.Errorf("nonce is not expected length: %v != %v", len(entry.decommit.r), expLen)
}
}
}
// Randomly selected nonces will be unique with overwhelming probability
func TestCommmitProducesDistinctNonces(t *testing.T) {
seen := make(map[[Size]byte]bool)
msg := []byte("black lives matter")
const iterations = 1000
// Check the pre-computed commitments for uniquness
for i := 0; i < iterations; i++ {
// Compute a commitment
_, dee, err := Commit(msg)
if err != nil {
t.Error(err)
}
// Ensure each nonce is unique
if seen[dee.r] {
t.Errorf("duplicate nonce found: %v", dee.r)
}
seen[dee.r] = true
}
}
func TestOpenOnValidCommitments(t *testing.T) {
for _, entry := range testResults {
// Open each commitment
ok, err := Open(entry.commit, *entry.decommit)
// There should be no error
if err != nil {
t.Error(err)
}
// The commitments should verify
if !ok {
t.Errorf("commitment failed to open: %v", entry.msg)
}
}
}
func TestOpenOnModifiedNonce(t *testing.T) {
for _, entry := range testResults {
dʹ := copyWitness(entry.decommit)
// Modify the nonce
dʹ.r[0] ^= 0x40
// Open and check for failure
ok, err := Open(entry.commit, *dʹ)
assertFailedOpen(t, ok, err)
}
}
func TestOpenOnZeroPrefixNonce(t *testing.T) {
for _, entry := range testResults {
dʹ := copyWitness(entry.decommit)
// Modify the nonce
dʹ.r[0] = 0x00
dʹ.r[1] = 0x00
dʹ.r[2] = 0x00
dʹ.r[3] = 0x00
dʹ.r[4] = 0x00
dʹ.r[5] = 0x00
dʹ.r[6] = 0x00
dʹ.r[7] = 0x00
dʹ.r[8] = 0x00
dʹ.r[9] = 0x00
dʹ.r[10] = 0x00
// Open and check for failure
ok, err := Open(entry.commit, *dʹ)
assertFailedOpen(t, ok, err)
}
}
// Makes a deep copy of a Witness
func copyWitness(d *Witness) *Witness {
msg := make([]byte, len(d.Msg))
var r [Size]byte
copy(msg, d.Msg)
copy(r[:], d.r[:])
return &Witness{msg, r}
}
// Asserts that err != nil, and ok == false.
func assertFailedOpen(t *testing.T, ok bool, err error) {
// There should be no error
if err != nil {
t.Error(err)
}
// But the commitments should fail
if ok {
t.Error("commitment was verified but was expected to fail")
}
}
// An unrelated message should fail on open
func TestOpenOnNewMessage(t *testing.T) {
for _, entry := range testResults {
dʹ := copyWitness(entry.decommit)
// Use a distinct message
dʹ.Msg = []byte("no one expects the spanish inquisition")
// Open and check for failure
ok, err := Open(entry.commit, *dʹ)
assertFailedOpen(t, ok, err)
}
}
// An appended message should fail on open
func TestOpenOnAppendedMessage(t *testing.T) {
for _, entry := range testResults {
dʹ := copyWitness(entry.decommit)
// Modify the message
dʹ.Msg = []byte("no one expects the spanish inquisition")
// Open and check for failure
ok, err := Open(entry.commit, *dʹ)
assertFailedOpen(t, ok, err)
}
}
// A modified message should fail on open
func TestOpenOnModifiedMessage(t *testing.T) {
for _, entry := range testResults {
// Skip the empty string message for this test case
if len(entry.msg) == 0 {
continue
}
// Modify the message _in situ_
dʹ := copyWitness(entry.decommit)
dʹ.Msg[1] ^= 0x99
// Open and check for failure
ok, err := Open(entry.commit, *dʹ)
assertFailedOpen(t, ok, err)
}
}
// A modified commitment should fail on open
func TestOpenOnModifiedCommitment(t *testing.T) {
for _, entry := range testResults {
// Copy and then modify the commitment
cʹ := make([]byte, Size)
copy(cʹ[:], entry.commit)
cʹ[6] ^= 0x33
// Open and check for failure
ok, err := Open(cʹ, *entry.decommit)
assertFailedOpen(t, ok, err)
}
}
// An empty decommit should fail to open
func TestOpenOnDefaultDecommitObject(t *testing.T) {
for _, entry := range testResults {
// Open and check for failure
ok, err := Open(entry.commit, Witness{})
assertFailedOpen(t, ok, err)
}
}
// A nil commit should return an error
func TestOpenOnNilCommitment(t *testing.T) {
_, err := Open(nil, Witness{})
assertError(t, err)
}
// Verifies that err != nil
func assertError(t *testing.T, err error) {
if err == nil {
t.Error("expected an error but received nil")
}
}
// Ill-formed commitment should produce an error
func TestOpenOnLongCommitment(t *testing.T) {
tooLong := make([]byte, Size+1)
_, err := Open(tooLong, Witness{})
assertError(t, err)
}
// Ill-formed commitment should produce an error
func TestOpenOnShortCommitment(t *testing.T) {
tooShort := make([]byte, Size-1)
_, err := Open(tooShort, Witness{})
assertError(t, err)
}
// Tests that marshal-unmarshal is the identity function
func TestWitnessMarshalRoundTrip(t *testing.T) {
expected := &Witness{
[]byte("I'm the dude. So that's what you call me"),
[Size]byte{0xAC},
}
// Marhal and test
jsonBytes, err := json.Marshal(expected)
require.NoError(t, err)
require.NotNil(t, jsonBytes)
// Unmarshal and test
actual := &Witness{}
require.NoError(t, json.Unmarshal(jsonBytes, actual))
require.Equal(t, expected.Msg, actual.Msg)
require.Equal(t, expected.r, actual.r)
}
// Tests that marshal-unmarshal is the identity function
func TestCommitmentMarshalRoundTrip(t *testing.T) {
expected := Commitment([]byte("That or uh his-dudeness or duder or el duderino."))
// Marhal and test
jsonBytes, err := json.Marshal(expected)
require.NoError(t, err)
require.NotNil(t, jsonBytes)
// Unmarshal and test
actual := Commitment{}
require.NoError(t, json.Unmarshal(jsonBytes, &actual))
require.Equal(t, []byte(expected), []byte(actual))
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

863
crypto/core/curves/curve.go Executable file
View File

@ -0,0 +1,863 @@
//
// Copyright Coinbase, Inc. All Rights Reserved.
//
// SPDX-License-Identifier: Apache-2.0
//
package curves
import (
"crypto/elliptic"
"encoding/hex"
"encoding/json"
"fmt"
"hash"
"io"
"math/big"
"sync"
"github.com/onsonr/sonr/crypto/core/curves/native/bls12381"
)
var (
k256Initonce sync.Once
k256 Curve
bls12381g1Initonce sync.Once
bls12381g1 Curve
bls12381g2Initonce sync.Once
bls12381g2 Curve
bls12377g1Initonce sync.Once
bls12377g1 Curve
bls12377g2Initonce sync.Once
bls12377g2 Curve
p256Initonce sync.Once
p256 Curve
ed25519Initonce sync.Once
ed25519 Curve
pallasInitonce sync.Once
pallas Curve
)
const (
K256Name = "secp256k1"
BLS12381G1Name = "BLS12381G1"
BLS12381G2Name = "BLS12381G2"
BLS12831Name = "BLS12831"
P256Name = "P-256"
ED25519Name = "ed25519"
PallasName = "pallas"
BLS12377G1Name = "BLS12377G1"
BLS12377G2Name = "BLS12377G2"
BLS12377Name = "BLS12377"
)
const scalarBytes = 32
// Scalar represents an element of the scalar field \mathbb{F}_q
// of the elliptic curve construction.
type Scalar interface {
// Random returns a random scalar using the provided reader
// to retrieve bytes
Random(reader io.Reader) Scalar
// Hash the specific bytes in a manner to yield a
// uniformly distributed scalar
Hash(bytes []byte) Scalar
// Zero returns the additive identity element
Zero() Scalar
// One returns the multiplicative identity element
One() Scalar
// IsZero returns true if this element is the additive identity element
IsZero() bool
// IsOne returns true if this element is the multiplicative identity element
IsOne() bool
// IsOdd returns true if this element is odd
IsOdd() bool
// IsEven returns true if this element is even
IsEven() bool
// New returns an element with the value equal to `value`
New(value int) Scalar
// Cmp returns
// -2 if this element is in a different field than rhs
// -1 if this element is less than rhs
// 0 if this element is equal to rhs
// 1 if this element is greater than rhs
Cmp(rhs Scalar) int
// Square returns element*element
Square() Scalar
// Double returns element+element
Double() Scalar
// Invert returns element^-1 mod p
Invert() (Scalar, error)
// Sqrt computes the square root of this element if it exists.
Sqrt() (Scalar, error)
// Cube returns element*element*element
Cube() Scalar
// Add returns element+rhs
Add(rhs Scalar) Scalar
// Sub returns element-rhs
Sub(rhs Scalar) Scalar
// Mul returns element*rhs
Mul(rhs Scalar) Scalar
// MulAdd returns element * y + z mod p
MulAdd(y, z Scalar) Scalar
// Div returns element*rhs^-1 mod p
Div(rhs Scalar) Scalar
// Neg returns -element mod p
Neg() Scalar
// SetBigInt returns this element set to the value of v
SetBigInt(v *big.Int) (Scalar, error)
// BigInt returns this element as a big integer
BigInt() *big.Int
// Point returns the associated point for this scalar
Point() Point
// Bytes returns the canonical byte representation of this scalar
Bytes() []byte
// SetBytes creates a scalar from the canonical representation expecting the exact number of bytes needed to represent the scalar
SetBytes(bytes []byte) (Scalar, error)
// SetBytesWide creates a scalar expecting double the exact number of bytes needed to represent the scalar which is reduced by the modulus
SetBytesWide(bytes []byte) (Scalar, error)
// Clone returns a cloned Scalar of this value
Clone() Scalar
}
type PairingScalar interface {
Scalar
SetPoint(p Point) PairingScalar
}
func unmarshalScalar(input []byte) (*Curve, []byte, error) {
sep := byte(':')
i := 0
for ; i < len(input); i++ {
if input[i] == sep {
break
}
}
name := string(input[:i])
curve := GetCurveByName(name)
if curve == nil {
return nil, nil, fmt.Errorf("unrecognized curve")
}
return curve, input[i+1:], nil
}
func scalarMarshalBinary(scalar Scalar) ([]byte, error) {
// All scalars are 32 bytes long
// The last 32 bytes are the actual value
// The first remaining bytes are the curve name
// separated by a colon
name := []byte(scalar.Point().CurveName())
output := make([]byte, len(name)+1+scalarBytes)
copy(output[:len(name)], name)
output[len(name)] = byte(':')
copy(output[len(name)+1:], scalar.Bytes())
return output, nil
}
func scalarUnmarshalBinary(input []byte) (Scalar, error) {
// All scalars are 32 bytes long
// The first 32 bytes are the actual value
// The remaining bytes are the curve name
if len(input) < scalarBytes+1+len(P256Name) {
return nil, fmt.Errorf("invalid byte sequence")
}
sc, data, err := unmarshalScalar(input)
if err != nil {
return nil, err
}
return sc.Scalar.SetBytes(data)
}
func scalarMarshalText(scalar Scalar) ([]byte, error) {
// All scalars are 32 bytes long
// For text encoding we put the curve name first for readability
// separated by a colon, then the hex encoding of the scalar
// which avoids the base64 weakness with strict mode or not
name := []byte(scalar.Point().CurveName())
output := make([]byte, len(name)+1+scalarBytes*2)
copy(output[:len(name)], name)
output[len(name)] = byte(':')
_ = hex.Encode(output[len(name)+1:], scalar.Bytes())
return output, nil
}
func scalarUnmarshalText(input []byte) (Scalar, error) {
if len(input) < scalarBytes*2+len(P256Name)+1 {
return nil, fmt.Errorf("invalid byte sequence")
}
curve, data, err := unmarshalScalar(input)
if err != nil {
return nil, err
}
var t [scalarBytes]byte
_, err = hex.Decode(t[:], data)
if err != nil {
return nil, err
}
return curve.Scalar.SetBytes(t[:])
}
func scalarMarshalJson(scalar Scalar) ([]byte, error) {
m := make(map[string]string, 2)
m["type"] = scalar.Point().CurveName()
m["value"] = hex.EncodeToString(scalar.Bytes())
return json.Marshal(m)
}
func scalarUnmarshalJson(input []byte) (Scalar, error) {
var m map[string]string
err := json.Unmarshal(input, &m)
if err != nil {
return nil, err
}
curve := GetCurveByName(m["type"])
if curve == nil {
return nil, fmt.Errorf("invalid type")
}
s, err := hex.DecodeString(m["value"])
if err != nil {
return nil, err
}
S, err := curve.Scalar.SetBytes(s)
if err != nil {
return nil, err
}
return S, nil
}
// Point represents an elliptic curve point
type Point interface {
Random(reader io.Reader) Point
Hash(bytes []byte) Point
Identity() Point
Generator() Point
IsIdentity() bool
IsNegative() bool
IsOnCurve() bool
Double() Point
Scalar() Scalar
Neg() Point
Add(rhs Point) Point
Sub(rhs Point) Point
Mul(rhs Scalar) Point
Equal(rhs Point) bool
Set(x, y *big.Int) (Point, error)
ToAffineCompressed() []byte
ToAffineUncompressed() []byte
FromAffineCompressed(bytes []byte) (Point, error)
FromAffineUncompressed(bytes []byte) (Point, error)
CurveName() string
SumOfProducts(points []Point, scalars []Scalar) Point
}
type PairingPoint interface {
Point
OtherGroup() PairingPoint
Pairing(rhs PairingPoint) Scalar
MultiPairing(...PairingPoint) Scalar
}
func pointMarshalBinary(point Point) ([]byte, error) {
// Always stores points in compressed form
// The first bytes are the curve name
// separated by a colon followed by the compressed point
// bytes
t := point.ToAffineCompressed()
name := []byte(point.CurveName())
output := make([]byte, len(name)+1+len(t))
copy(output[:len(name)], name)
output[len(name)] = byte(':')
copy(output[len(output)-len(t):], t)
return output, nil
}
func pointUnmarshalBinary(input []byte) (Point, error) {
if len(input) < scalarBytes+1+len(P256Name) {
return nil, fmt.Errorf("invalid byte sequence")
}
sep := byte(':')
i := 0
for ; i < len(input); i++ {
if input[i] == sep {
break
}
}
name := string(input[:i])
curve := GetCurveByName(name)
if curve == nil {
return nil, fmt.Errorf("unrecognized curve")
}
return curve.Point.FromAffineCompressed(input[i+1:])
}
func pointMarshalText(point Point) ([]byte, error) {
// Always stores points in compressed form
// The first bytes are the curve name
// separated by a colon followed by the compressed point
// bytes
t := point.ToAffineCompressed()
name := []byte(point.CurveName())
output := make([]byte, len(name)+1+len(t)*2)
copy(output[:len(name)], name)
output[len(name)] = byte(':')
hex.Encode(output[len(output)-len(t)*2:], t)
return output, nil
}
func pointUnmarshalText(input []byte) (Point, error) {
if len(input) < scalarBytes*2+1+len(P256Name) {
return nil, fmt.Errorf("invalid byte sequence")
}
sep := byte(':')
i := 0
for ; i < len(input); i++ {
if input[i] == sep {
break
}
}
name := string(input[:i])
curve := GetCurveByName(name)
if curve == nil {
return nil, fmt.Errorf("unrecognized curve")
}
buffer := make([]byte, (len(input)-i)/2)
_, err := hex.Decode(buffer, input[i+1:])
if err != nil {
return nil, err
}
return curve.Point.FromAffineCompressed(buffer)
}
func pointMarshalJson(point Point) ([]byte, error) {
m := make(map[string]string, 2)
m["type"] = point.CurveName()
m["value"] = hex.EncodeToString(point.ToAffineCompressed())
return json.Marshal(m)
}
func pointUnmarshalJson(input []byte) (Point, error) {
var m map[string]string
err := json.Unmarshal(input, &m)
if err != nil {
return nil, err
}
curve := GetCurveByName(m["type"])
if curve == nil {
return nil, fmt.Errorf("invalid type")
}
p, err := hex.DecodeString(m["value"])
if err != nil {
return nil, err
}
P, err := curve.Point.FromAffineCompressed(p)
if err != nil {
return nil, err
}
return P, nil
}
// Curve represents a named elliptic curve with a scalar field and point group
type Curve struct {
Scalar Scalar
Point Point
Name string
}
func (c Curve) ScalarBaseMult(sc Scalar) Point {
return c.Point.Generator().Mul(sc)
}
func (c Curve) NewGeneratorPoint() Point {
return c.Point.Generator()
}
func (c Curve) NewIdentityPoint() Point {
return c.Point.Identity()
}
func (c Curve) NewScalar() Scalar {
return c.Scalar.Zero()
}
// ToEllipticCurve returns the equivalent of this curve as the go interface `elliptic.Curve`
func (c Curve) ToEllipticCurve() (elliptic.Curve, error) {
err := fmt.Errorf("can't convert %s", c.Name)
switch c.Name {
case K256Name:
return K256Curve(), nil
case BLS12381G1Name:
return nil, err
case BLS12381G2Name:
return nil, err
case BLS12831Name:
return nil, err
case P256Name:
return NistP256Curve(), nil
case ED25519Name:
return nil, err
case PallasName:
return nil, err
case BLS12377G1Name:
return nil, err
case BLS12377G2Name:
return nil, err
case BLS12377Name:
return nil, err
default:
return nil, err
}
}
// PairingCurve represents a named elliptic curve
// that supports pairings
type PairingCurve struct {
Scalar PairingScalar
PointG1 PairingPoint
PointG2 PairingPoint
GT Scalar
Name string
}
func (c PairingCurve) ScalarG1BaseMult(sc Scalar) PairingPoint {
return c.PointG1.Generator().Mul(sc).(PairingPoint)
}
func (c PairingCurve) ScalarG2BaseMult(sc Scalar) PairingPoint {
return c.PointG2.Generator().Mul(sc).(PairingPoint)
}
func (c PairingCurve) NewG1GeneratorPoint() PairingPoint {
return c.PointG1.Generator().(PairingPoint)
}
func (c PairingCurve) NewG2GeneratorPoint() PairingPoint {
return c.PointG2.Generator().(PairingPoint)
}
func (c PairingCurve) NewG1IdentityPoint() PairingPoint {
return c.PointG1.Identity().(PairingPoint)
}
func (c PairingCurve) NewG2IdentityPoint() PairingPoint {
return c.PointG2.Identity().(PairingPoint)
}
func (c PairingCurve) NewScalar() PairingScalar {
return c.Scalar.Zero().(PairingScalar)
}
// GetCurveByName returns the correct `Curve` given the name
func GetCurveByName(name string) *Curve {
switch name {
case K256Name:
return K256()
case BLS12381G1Name:
return BLS12381G1()
case BLS12381G2Name:
return BLS12381G2()
case BLS12831Name:
return BLS12381G1()
case P256Name:
return P256()
case ED25519Name:
return ED25519()
case PallasName:
return PALLAS()
case BLS12377G1Name:
return BLS12377G1()
case BLS12377G2Name:
return BLS12377G2()
case BLS12377Name:
return BLS12377G1()
default:
return nil
}
}
func GetPairingCurveByName(name string) *PairingCurve {
switch name {
case BLS12381G1Name:
return BLS12381(BLS12381G1().NewIdentityPoint())
case BLS12381G2Name:
return BLS12381(BLS12381G2().NewIdentityPoint())
case BLS12831Name:
return BLS12381(BLS12381G1().NewIdentityPoint())
default:
return nil
}
}
// BLS12381G1 returns the BLS12-381 curve with points in G1
func BLS12381G1() *Curve {
bls12381g1Initonce.Do(bls12381g1Init)
return &bls12381g1
}
func bls12381g1Init() {
bls12381g1 = Curve{
Scalar: &ScalarBls12381{
Value: bls12381.Bls12381FqNew(),
point: new(PointBls12381G1),
},
Point: new(PointBls12381G1).Identity(),
Name: BLS12381G1Name,
}
}
// BLS12381G2 returns the BLS12-381 curve with points in G2
func BLS12381G2() *Curve {
bls12381g2Initonce.Do(bls12381g2Init)
return &bls12381g2
}
func bls12381g2Init() {
bls12381g2 = Curve{
Scalar: &ScalarBls12381{
Value: bls12381.Bls12381FqNew(),
point: new(PointBls12381G2),
},
Point: new(PointBls12381G2).Identity(),
Name: BLS12381G2Name,
}
}
func BLS12381(preferredPoint Point) *PairingCurve {
return &PairingCurve{
Scalar: &ScalarBls12381{
Value: bls12381.Bls12381FqNew(),
point: preferredPoint,
},
PointG1: &PointBls12381G1{
Value: new(bls12381.G1).Identity(),
},
PointG2: &PointBls12381G2{
Value: new(bls12381.G2).Identity(),
},
GT: &ScalarBls12381Gt{
Value: new(bls12381.Gt).SetOne(),
},
Name: BLS12831Name,
}
}
// BLS12377G1 returns the BLS12-377 curve with points in G1
func BLS12377G1() *Curve {
bls12377g1Initonce.Do(bls12377g1Init)
return &bls12377g1
}
func bls12377g1Init() {
bls12377g1 = Curve{
Scalar: &ScalarBls12377{
value: new(big.Int),
point: new(PointBls12377G1),
},
Point: new(PointBls12377G1).Identity(),
Name: BLS12377G1Name,
}
}
// BLS12377G2 returns the BLS12-377 curve with points in G2
func BLS12377G2() *Curve {
bls12377g2Initonce.Do(bls12377g2Init)
return &bls12377g2
}
func bls12377g2Init() {
bls12377g2 = Curve{
Scalar: &ScalarBls12377{
value: new(big.Int),
point: new(PointBls12377G2),
},
Point: new(PointBls12377G2).Identity(),
Name: BLS12377G2Name,
}
}
// K256 returns the secp256k1 curve
func K256() *Curve {
k256Initonce.Do(k256Init)
return &k256
}
func k256Init() {
k256 = Curve{
Scalar: new(ScalarK256).Zero(),
Point: new(PointK256).Identity(),
Name: K256Name,
}
}
func P256() *Curve {
p256Initonce.Do(p256Init)
return &p256
}
func p256Init() {
p256 = Curve{
Scalar: new(ScalarP256).Zero(),
Point: new(PointP256).Identity(),
Name: P256Name,
}
}
func ED25519() *Curve {
ed25519Initonce.Do(ed25519Init)
return &ed25519
}
func ed25519Init() {
ed25519 = Curve{
Scalar: new(ScalarEd25519).Zero(),
Point: new(PointEd25519).Identity(),
Name: ED25519Name,
}
}
func PALLAS() *Curve {
pallasInitonce.Do(pallasInit)
return &pallas
}
func pallasInit() {
pallas = Curve{
Scalar: new(ScalarPallas).Zero(),
Point: new(PointPallas).Identity(),
Name: PallasName,
}
}
// https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-11#appendix-G.2.1
func osswu3mod4(u *big.Int, p *sswuParams) (x, y *big.Int) {
params := p.Params
field := NewField(p.Params.P)
tv1 := field.NewElement(u)
tv1 = tv1.Mul(tv1) // tv1 = u^2
tv3 := field.NewElement(p.Z).Mul(tv1) // tv3 = Z * tv1
tv2 := tv3.Mul(tv3) // tv2 = tv3^2
xd := tv2.Add(tv3) // xd = tv2 + tv3
x1n := xd.Add(field.One()) // x1n = (xd + 1)
x1n = x1n.Mul(field.NewElement(p.B)) // x1n * B
aNeg := field.NewElement(p.A).Neg()
xd = xd.Mul(aNeg) // xd = -A * xd
if xd.Value.Cmp(big.NewInt(0)) == 0 {
xd = field.NewElement(p.Z).Mul(field.NewElement(p.A)) // xd = Z * A
}
tv2 = xd.Mul(xd) // tv2 = xd^2
gxd := tv2.Mul(xd) // gxd = tv2 * xd
tv2 = tv2.Mul(field.NewElement(p.A)) // tv2 = A * tv2
gx1 := x1n.Mul(x1n) // gx1 = x1n^2
gx1 = gx1.Add(tv2) // gx1 = gx1 + tv2
gx1 = gx1.Mul(x1n) // gx1 = gx1 * x1n
tv2 = gxd.Mul(field.NewElement(p.B)) // tv2 = B * gxd
gx1 = gx1.Add(tv2) // gx1 = gx1 + tv2
tv4 := gxd.Mul(gxd) // tv4 = gxd^2
tv2 = gx1.Mul(gxd) // tv2 = gx1 * gxd
tv4 = tv4.Mul(tv2) // tv4 = tv4 * tv2
y1 := tv4.Pow(field.NewElement(p.C1))
y1 = y1.Mul(tv2) // y1 = y1 * tv2
x2n := tv3.Mul(x1n) // x2n = tv3 * x1n
y2 := y1.Mul(field.NewElement(p.C2)) // y2 = y1 * c2
y2 = y2.Mul(tv1) // y2 = y2 * tv1
y2 = y2.Mul(field.NewElement(u)) // y2 = y2 * u
tv2 = y1.Mul(y1) // tv2 = y1^2
tv2 = tv2.Mul(gxd) // tv2 = tv2 * gxd
e2 := tv2.Value.Cmp(gx1.Value) == 0
// If e2, x = x1, else x = x2
if e2 {
x = x1n.Value
} else {
x = x2n.Value
}
// xn / xd
x.Mul(x, new(big.Int).ModInverse(xd.Value, params.P))
x.Mod(x, params.P)
// If e2, y = y1, else y = y2
if e2 {
y = y1.Value
} else {
y = y2.Value
}
uBytes := u.Bytes()
yBytes := y.Bytes()
usign := uBytes[len(uBytes)-1] & 1
ysign := yBytes[len(yBytes)-1] & 1
// Fix sign of y
if usign != ysign {
y.Neg(y)
y.Mod(y, params.P)
}
return
}
func expandMsgXmd(h hash.Hash, msg, domain []byte, outLen int) ([]byte, error) {
domainLen := uint8(len(domain))
if domainLen > 255 {
return nil, fmt.Errorf("invalid domain length")
}
// DST_prime = DST || I2OSP(len(DST), 1)
// b_0 = H(Z_pad || msg || l_i_b_str || I2OSP(0, 1) || DST_prime)
_, _ = h.Write(make([]byte, h.BlockSize()))
_, _ = h.Write(msg)
_, _ = h.Write([]byte{uint8(outLen >> 8), uint8(outLen)})
_, _ = h.Write([]byte{0})
_, _ = h.Write(domain)
_, _ = h.Write([]byte{domainLen})
b0 := h.Sum(nil)
// b_1 = H(b_0 || I2OSP(1, 1) || DST_prime)
h.Reset()
_, _ = h.Write(b0)
_, _ = h.Write([]byte{1})
_, _ = h.Write(domain)
_, _ = h.Write([]byte{domainLen})
b1 := h.Sum(nil)
// b_i = H(strxor(b_0, b_(i - 1)) || I2OSP(i, 1) || DST_prime)
ell := (outLen + h.Size() - 1) / h.Size()
bi := b1
out := make([]byte, outLen)
for i := 1; i < ell; i++ {
h.Reset()
// b_i = H(strxor(b_0, b_(i - 1)) || I2OSP(i, 1) || DST_prime)
tmp := make([]byte, h.Size())
for j := 0; j < h.Size(); j++ {
tmp[j] = b0[j] ^ bi[j]
}
_, _ = h.Write(tmp)
_, _ = h.Write([]byte{1 + uint8(i)})
_, _ = h.Write(domain)
_, _ = h.Write([]byte{domainLen})
// b_1 || ... || b_(ell - 1)
copy(out[(i-1)*h.Size():i*h.Size()], bi[:])
bi = h.Sum(nil)
}
// b_ell
copy(out[(ell-1)*h.Size():], bi[:])
return out[:outLen], nil
}
func bhex(s string) *big.Int {
r, _ := new(big.Int).SetString(s, 16)
return r
}
type sswuParams struct {
Params *elliptic.CurveParams
C1, C2, A, B, Z *big.Int
}
// sumOfProductsPippenger implements a version of Pippenger's algorithm.
//
// The algorithm works as follows:
//
// Let `n` be a number of point-scalar pairs.
// Let `w` be a window of bits (6..8, chosen based on `n`, see cost factor).
//
// 1. Prepare `2^(w-1) - 1` buckets with indices `[1..2^(w-1))` initialized with identity points.
// Bucket 0 is not needed as it would contain points multiplied by 0.
// 2. Convert scalars to a radix-`2^w` representation with signed digits in `[-2^w/2, 2^w/2]`.
// Note: only the last digit may equal `2^w/2`.
// 3. Starting with the last window, for each point `i=[0..n)` add it to a a bucket indexed by
// the point's scalar's value in the window.
// 4. Once all points in a window are sorted into buckets, add buckets by multiplying each
// by their index. Efficient way of doing it is to start with the last bucket and compute two sums:
// intermediate sum from the last to the first, and the full sum made of all intermediate sums.
// 5. Shift the resulting sum of buckets by `w` bits by using `w` doublings.
// 6. Add to the return value.
// 7. Repeat the loop.
//
// Approximate cost w/o wNAF optimizations (A = addition, D = doubling):
//
// ```ascii
// cost = (n*A + 2*(2^w/2)*A + w*D + A)*256/w
//
// | | | | |
// | | | | looping over 256/w windows
// | | | adding to the result
// sorting points | shifting the sum by w bits (to the next window, starting from last window)
// one by one |
// into buckets adding/subtracting all buckets
// multiplied by their indexes
// using a sum of intermediate sums
//
// ```
//
// For large `n`, dominant factor is (n*256/w) additions.
// However, if `w` is too big and `n` is not too big, then `(2^w/2)*A` could dominate.
// Therefore, the optimal choice of `w` grows slowly as `n` grows.
//
// # For constant time we use a fixed window of 6
//
// This algorithm is adapted from section 4 of <https://eprint.iacr.org/2012/549.pdf>.
// and https://cacr.uwaterloo.ca/techreports/2010/cacr2010-26.pdf
func sumOfProductsPippenger(points []Point, scalars []*big.Int) Point {
if len(points) != len(scalars) {
return nil
}
const w = 6
bucketSize := (1 << w) - 1
windows := make([]Point, 255/w+1)
for i := range windows {
windows[i] = points[0].Identity()
}
bucket := make([]Point, bucketSize)
for j := 0; j < len(windows); j++ {
for i := 0; i < bucketSize; i++ {
bucket[i] = points[0].Identity()
}
for i := 0; i < len(scalars); i++ {
index := bucketSize & int(new(big.Int).Rsh(scalars[i], uint(w*j)).Int64())
if index != 0 {
bucket[index-1] = bucket[index-1].Add(points[i])
}
}
acc, sum := windows[j].Identity(), windows[j].Identity()
for i := bucketSize - 1; i >= 0; i-- {
sum = sum.Add(bucket[i])
acc = acc.Add(sum)
}
windows[j] = acc
}
acc := windows[0].Identity()
for i := len(windows) - 1; i >= 0; i-- {
for j := 0; j < w; j++ {
acc = acc.Double()
}
acc = acc.Add(windows[i])
}
return acc
}

251
crypto/core/curves/ec_point.go Executable file
View File

@ -0,0 +1,251 @@
//
// Copyright Coinbase, Inc. All Rights Reserved.
//
// SPDX-License-Identifier: Apache-2.0
//
package curves
import (
"crypto/elliptic"
"encoding/json"
"fmt"
"math/big"
"github.com/onsonr/sonr/crypto/core"
"github.com/dustinxie/ecc"
"github.com/onsonr/sonr/crypto/internal"
)
var curveNameToID = map[string]byte{
"secp256k1": 0,
"P-224": 1,
"P-256": 2,
"P-384": 3,
"P-521": 4,
}
var curveIDToName = map[byte]func() elliptic.Curve{
0: ecc.P256k1,
1: elliptic.P224,
2: elliptic.P256,
3: elliptic.P384,
4: elliptic.P521,
}
var curveMapper = map[string]func() elliptic.Curve{
"secp256k1": ecc.P256k1,
"P-224": elliptic.P224,
"P-256": elliptic.P256,
"P-384": elliptic.P384,
"P-521": elliptic.P521,
}
// EcPoint represents an elliptic curve Point
type EcPoint struct {
Curve elliptic.Curve
X, Y *big.Int
}
// EcPointJSON encapsulates the data that is serialized to JSON
// used internally and not for external use. Public so other pieces
// can use for serialization
type EcPointJSON struct {
X *big.Int `json:"x"`
Y *big.Int `json:"y"`
CurveName string `json:"curve_name"`
}
// MarshalJSON serializes EcPoint to JSON
func (a EcPoint) MarshalJSON() ([]byte, error) {
return json.Marshal(EcPointJSON{
CurveName: a.Curve.Params().Name,
X: a.X,
Y: a.Y,
})
}
// UnmarshalJSON deserializes JSON to EcPoint
func (a *EcPoint) UnmarshalJSON(bytes []byte) error {
data := new(EcPointJSON)
err := json.Unmarshal(bytes, data)
if err != nil {
return err
}
if mapper, ok := curveMapper[data.CurveName]; ok {
a.Curve = mapper()
a.X = data.X
a.Y = data.Y
return nil
}
return fmt.Errorf("unknown curve deserialized")
}
// MarshalBinary serializes EcPoint to binary
func (a *EcPoint) MarshalBinary() ([]byte, error) {
result := [65]byte{}
if code, ok := curveNameToID[a.Curve.Params().Name]; ok {
result[0] = code
a.X.FillBytes(result[1:33])
a.Y.FillBytes(result[33:65])
return result[:], nil
}
return nil, fmt.Errorf("unknown curve serialized")
}
// UnmarshalBinary deserializes binary to EcPoint
func (a *EcPoint) UnmarshalBinary(data []byte) error {
if mapper, ok := curveIDToName[data[0]]; ok {
a.Curve = mapper()
a.X = new(big.Int).SetBytes(data[1:33])
a.Y = new(big.Int).SetBytes(data[33:65])
return nil
}
return fmt.Errorf("unknown curve deserialized")
}
// IsValid checks if the point is valid
func (a EcPoint) IsValid() bool {
return a.IsOnCurve() || a.IsIdentity()
}
// IsOnCurve checks if the point is on the curve
func (a EcPoint) IsOnCurve() bool {
return a.Curve.IsOnCurve(a.X, a.Y)
}
// IsIdentity returns true if this Point is the Point at infinity
func (a EcPoint) IsIdentity() bool {
x := core.ConstantTimeEqByte(a.X, core.Zero)
y := core.ConstantTimeEqByte(a.Y, core.Zero)
return (x & y) == 1
}
// Equals return true if a and b have the same x,y coordinates
func (a EcPoint) Equals(b *EcPoint) bool {
if !sameCurve(&a, b) {
return false
}
x := core.ConstantTimeEqByte(a.X, b.X)
y := core.ConstantTimeEqByte(a.Y, b.Y)
return (x & y) == 1
}
// IsBasePoint returns true if this Point is curve's base Point
func (a EcPoint) IsBasePoint() bool {
p := a.Curve.Params()
x := core.ConstantTimeEqByte(a.X, p.Gx)
y := core.ConstantTimeEqByte(a.Y, p.Gy)
return (x & y) == 1
}
// reduceModN normalizes the Scalar to a positive element smaller than the base Point order.
func reduceModN(curve elliptic.Curve, k *big.Int) *big.Int {
return new(big.Int).Mod(k, curve.Params().N)
}
// Add performs elliptic curve addition on two points
func (a *EcPoint) Add(b *EcPoint) (*EcPoint, error) {
if a == nil || b == nil {
return nil, internal.ErrNilArguments
}
if !sameCurve(a, b) {
return nil, internal.ErrPointsDistinctCurves
}
p := &EcPoint{Curve: a.Curve}
p.X, p.Y = a.Curve.Add(a.X, a.Y, b.X, b.Y)
if !p.IsValid() {
return nil, internal.ErrNotOnCurve
}
return p, nil
}
// Neg returns the negation of a Weierstrass Point.
func (a *EcPoint) Neg() (*EcPoint, error) {
if a == nil {
return nil, internal.ErrNilArguments
}
p := &EcPoint{Curve: a.Curve, X: a.X, Y: new(big.Int).Sub(a.Curve.Params().P, a.Y)}
if !p.IsValid() {
return nil, internal.ErrNotOnCurve
}
return p, nil
}
// ScalarMult multiplies this Point by a Scalar
func (a *EcPoint) ScalarMult(k *big.Int) (*EcPoint, error) {
if a == nil || k == nil {
return nil, fmt.Errorf("cannot multiply nil Point or element")
}
n := reduceModN(a.Curve, k)
p := new(EcPoint)
p.Curve = a.Curve
p.X, p.Y = a.Curve.ScalarMult(a.X, a.Y, n.Bytes())
if !p.IsValid() {
return nil, fmt.Errorf("result not on the curve")
}
return p, nil
}
// NewScalarBaseMult creates a Point from the base Point multiplied by a field element
func NewScalarBaseMult(curve elliptic.Curve, k *big.Int) (*EcPoint, error) {
if curve == nil || k == nil {
return nil, fmt.Errorf("nil parameters are not supported")
}
n := reduceModN(curve, k)
p := new(EcPoint)
p.Curve = curve
p.X, p.Y = curve.ScalarBaseMult(n.Bytes())
if !p.IsValid() {
return nil, fmt.Errorf("result not on the curve")
}
return p, nil
}
// Bytes returns the bytes represented by this Point with x || y
func (a EcPoint) Bytes() []byte {
fieldSize := internal.CalcFieldSize(a.Curve)
out := make([]byte, fieldSize*2)
a.X.FillBytes(out[0:fieldSize])
a.Y.FillBytes(out[fieldSize : fieldSize*2])
return out
}
// PointFromBytesUncompressed outputs uncompressed X || Y similar to
// https://www.secg.org/sec1-v1.99.dif.pdf section 2.2 and 2.3
func PointFromBytesUncompressed(curve elliptic.Curve, b []byte) (*EcPoint, error) {
fieldSize := internal.CalcFieldSize(curve)
if len(b) != fieldSize*2 {
return nil, fmt.Errorf("invalid number of bytes")
}
p := &EcPoint{
Curve: curve,
X: new(big.Int).SetBytes(b[:fieldSize]),
Y: new(big.Int).SetBytes(b[fieldSize:]),
}
if !p.IsValid() {
return nil, fmt.Errorf("invalid Point")
}
return p, nil
}
// sameCurve determines if points a,b appear to be from the same curve
func sameCurve(a, b *EcPoint) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
aParams := a.Curve.Params()
bParams := b.Curve.Params()
return aParams.P.Cmp(bParams.P) == 0 &&
aParams.N.Cmp(bParams.N) == 0 &&
aParams.B.Cmp(bParams.B) == 0 &&
aParams.BitSize == bParams.BitSize &&
aParams.Gx.Cmp(bParams.Gx) == 0 &&
aParams.Gy.Cmp(bParams.Gy) == 0 &&
aParams.Name == bParams.Name
}

View File

@ -0,0 +1,369 @@
//
// Copyright Coinbase, Inc. All Rights Reserved.
//
// SPDX-License-Identifier: Apache-2.0
//
package curves
import (
"bytes"
"crypto/elliptic"
"math/big"
"testing"
"github.com/btcsuite/btcd/btcec/v2"
"github.com/stretchr/testify/require"
"github.com/onsonr/sonr/crypto/core"
tt "github.com/onsonr/sonr/crypto/internal"
)
func TestIsIdentity(t *testing.T) {
// Should be Point at infinity
identity := &EcPoint{btcec.S256(), core.Zero, core.Zero}
require.True(t, identity.IsIdentity())
}
func TestNewScalarBaseMultZero(t *testing.T) {
// Should be Point at infinity
curve := btcec.S256()
num := big.NewInt(0)
p, err := NewScalarBaseMult(curve, num)
if err != nil {
t.Errorf("NewScalarBaseMult failed: %v", err)
}
if p == nil {
t.Errorf("NewScalarBaseMult failed when it should've succeeded.")
}
}
func TestNewScalarBaseMultOne(t *testing.T) {
// Should be base Point
curve := btcec.S256()
num := big.NewInt(1)
p, err := NewScalarBaseMult(curve, num)
if err != nil {
t.Errorf("NewScalarBaseMult failed: %v", err)
}
if p == nil {
t.Errorf("NewScalarBaseMult failed when it should've succeeded.")
t.FailNow()
}
if !bytes.Equal(p.Bytes(), append(curve.Gx.Bytes(), curve.Gy.Bytes()...)) {
t.Errorf("NewScalarBaseMult should've returned the base Point.")
}
}
func TestNewScalarBaseMultNeg(t *testing.T) {
curve := btcec.S256()
num := big.NewInt(-1)
p, err := NewScalarBaseMult(curve, num)
if err != nil {
t.Errorf("NewScalarBaseMult failed: %v", err)
}
if p == nil {
t.Errorf("NewScalarBaseMult failed when it should've succeeded.")
t.FailNow()
}
num.Mod(num, curve.N)
e, err := NewScalarBaseMult(curve, num)
if err != nil {
t.Errorf("NewScalarBaseMult failed: %v", err)
}
if e == nil {
t.Errorf("NewScalarBaseMult failed when it should've succeeded.")
t.FailNow()
}
if !bytes.Equal(p.Bytes(), e.Bytes()) {
t.Errorf("NewScalarBaseMult should've returned the %v, found: %v", e, p)
}
}
func TestScalarMultZero(t *testing.T) {
// Should be Point at infinity
curve := btcec.S256()
p := &EcPoint{
Curve: curve,
X: curve.Gx,
Y: curve.Gy,
}
num := big.NewInt(0)
q, err := p.ScalarMult(num)
if err != nil {
t.Errorf("ScalarMult failed: %v", err)
}
if q == nil {
t.Errorf("ScalarMult failed when it should've succeeded.")
t.FailNow()
}
if !q.IsIdentity() {
t.Errorf("ScalarMult should've returned the identity Point.")
}
}
func TestScalarMultOne(t *testing.T) {
// Should be base Point
curve := btcec.S256()
p := &EcPoint{
Curve: curve,
X: curve.Gx,
Y: curve.Gy,
}
num := big.NewInt(1)
q, err := p.ScalarMult(num)
if err != nil {
t.Errorf("ScalarMult failed: %v", err)
}
if q == nil {
t.Errorf("ScalarMult failed when it should've succeeded.")
t.FailNow()
}
if !bytes.Equal(q.Bytes(), append(curve.Gx.Bytes(), curve.Gy.Bytes()...)) {
t.Errorf("ScalarMult should've returned the base Point.")
}
}
func TestScalarMultNeg(t *testing.T) {
curve := btcec.S256()
p := &EcPoint{
Curve: curve,
X: curve.Gx,
Y: curve.Gy,
}
num := big.NewInt(-1)
q, err := p.ScalarMult(num)
if err != nil {
t.Errorf("ScalarMult failed: %v", err)
}
if q == nil {
t.Errorf("ScalarMult failed when it should've succeeded.")
}
num.Mod(num, curve.N)
e, err := p.ScalarMult(num)
if err != nil {
t.Errorf("ScalarMult failed: %v", err)
}
if e == nil {
t.Errorf("ScalarMult failed when it should've succeeded.")
t.FailNow()
}
if !bytes.Equal(q.Bytes(), e.Bytes()) {
t.Errorf("ScalarMult should've returned the %v, found: %v", e, p)
}
}
func TestEcPointAddSimple(t *testing.T) {
curve := btcec.S256()
num := big.NewInt(1)
p1, _ := NewScalarBaseMult(curve, num)
p2, _ := NewScalarBaseMult(curve, num)
p3, err := p1.Add(p2)
if err != nil {
t.Errorf("EcPoint.Add failed: %v", err)
}
num = big.NewInt(2)
ep, _ := NewScalarBaseMult(curve, num)
if !bytes.Equal(ep.Bytes(), p3.Bytes()) {
t.Errorf("EcPoint.Add failed: should equal %v, found: %v", ep, p3)
}
}
func TestEcPointAddCommunicative(t *testing.T) {
curve := btcec.S256()
a, _ := core.Rand(curve.Params().N)
b, _ := core.Rand(curve.Params().N)
p1, _ := NewScalarBaseMult(curve, a)
p2, _ := NewScalarBaseMult(curve, b)
p3, err := p1.Add(p2)
if err != nil {
t.Errorf("EcPoint.Add failed: %v", err)
}
p4, err := p2.Add(p1)
if err != nil {
t.Errorf("EcPoint.Add failed: %v", err)
}
if !bytes.Equal(p3.Bytes(), p4.Bytes()) {
t.Errorf("EcPoint.Add Communicative not valid")
}
}
func TestEcPointAddNeg(t *testing.T) {
curve := btcec.S256()
num := big.NewInt(-1)
p1, _ := NewScalarBaseMult(curve, num)
num.Abs(num)
p2, _ := NewScalarBaseMult(curve, num)
p3, err := p1.Add(p2)
if err != nil {
t.Errorf("EcPoint.Add failed: %v", err)
}
zero := make([]byte, 64)
if !bytes.Equal(zero, p3.Bytes()) {
t.Errorf("Expected value to be zero, found: %v", p3)
}
}
func TestEcPointBytes(t *testing.T) {
curve := btcec.S256()
point, err := NewScalarBaseMult(curve, big.NewInt(2))
require.NoError(t, err)
data := point.Bytes()
point2, err := PointFromBytesUncompressed(curve, data)
require.NoError(t, err)
if point.X.Cmp(point2.X) != 0 && point.Y.Cmp(point2.Y) != 0 {
t.Errorf("Points are not equal. Expected %v, found %v", point, point2)
}
curve2 := elliptic.P224()
p2, err := NewScalarBaseMult(curve2, big.NewInt(2))
require.NoError(t, err)
dta := p2.Bytes()
point3, err := PointFromBytesUncompressed(curve2, dta)
require.NoError(t, err)
if p2.X.Cmp(point3.X) != 0 && p2.Y.Cmp(point3.Y) != 0 {
t.Errorf("Points are not equal. Expected %v, found %v", p2, point3)
}
curve3 := elliptic.P521()
p3, err := NewScalarBaseMult(curve3, big.NewInt(2))
require.NoError(t, err)
data = p3.Bytes()
point4, err := PointFromBytesUncompressed(curve3, data)
require.NoError(t, err)
if p3.X.Cmp(point4.X) != 0 && p3.Y.Cmp(point4.Y) != 0 {
t.Errorf("Points are not equal. Expected %v, found %v", p3, point4)
}
}
func TestEcPointBytesDifferentCurves(t *testing.T) {
k256 := btcec.S256()
p224 := elliptic.P224()
p256 := elliptic.P256()
kp, err := NewScalarBaseMult(k256, big.NewInt(1))
require.NoError(t, err)
data := kp.Bytes()
_, err = PointFromBytesUncompressed(p224, data)
require.Error(t, err)
_, err = PointFromBytesUncompressed(p256, data)
require.Error(t, err)
}
func TestEcPointBytesInvalidNumberBytes(t *testing.T) {
curve := btcec.S256()
for i := 1; i < 64; i++ {
data := make([]byte, i)
_, err := PointFromBytesUncompressed(curve, data)
require.Error(t, err)
}
for i := 65; i < 128; i++ {
data := make([]byte, i)
_, err := PointFromBytesUncompressed(curve, data)
require.Error(t, err)
}
}
func TestEcPointMultRandom(t *testing.T) {
curve := btcec.S256()
r, err := core.Rand(curve.N)
require.NoError(t, err)
pt, err := NewScalarBaseMult(curve, r)
require.NoError(t, err)
require.NotNil(t, pt)
data := pt.Bytes()
pt2, err := PointFromBytesUncompressed(curve, data)
require.NoError(t, err)
if pt.X.Cmp(pt2.X) != 0 || pt.Y.Cmp(pt2.Y) != 0 {
t.Errorf("Points are not equal. Expected: %v, found: %v", pt, pt2)
}
}
func TestIsBasePoint(t *testing.T) {
k256 := btcec.S256()
p224 := elliptic.P224()
p256 := elliptic.P256()
notG_p224, err := NewScalarBaseMult(p224, tt.B10("9876453120"))
require.NoError(t, err)
tests := []struct {
name string
curve elliptic.Curve
x, y *big.Int
expected bool
}{
{"k256-positive", k256, k256.Gx, k256.Gy, true},
{"p224-positive", p224, p224.Params().Gx, p224.Params().Gy, true},
{"p256-positive", p256, p256.Params().Gx, p256.Params().Gy, true},
{"p224-negative", p224, notG_p224.X, notG_p224.Y, false},
{"p256-negative-wrong-curve", p256, notG_p224.X, notG_p224.Y, false},
{"k256-negative-doubleGx", k256, k256.Gx, k256.Gx, false},
{"k256-negative-doubleGy", k256, k256.Gy, k256.Gy, false},
{"k256-negative-xy-swap", k256, k256.Gy, k256.Gx, false},
{"k256-negative-oh-oh", k256, core.Zero, core.Zero, false},
}
// Run all the tests!
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
actual := EcPoint{test.curve, test.x, test.y}.IsBasePoint()
require.Equal(t, test.expected, actual)
})
}
}
func TestEquals(t *testing.T) {
k256 := btcec.S256()
p224 := elliptic.P224()
p256 := elliptic.P256()
P_p224, _ := NewScalarBaseMult(p224, tt.B10("9876453120"))
P1_p224, _ := NewScalarBaseMult(p224, tt.B10("9876453120"))
P_k256 := &EcPoint{k256, P_p224.X, P_p224.Y}
id_p224 := &EcPoint{p224, core.Zero, core.Zero}
id_k256 := &EcPoint{k256, core.Zero, core.Zero}
id_p256 := &EcPoint{p256, core.Zero, core.Zero}
tests := []struct {
name string
x, y *EcPoint
expected bool
}{
{"p224 same pointer", P_p224, P_p224, true},
{"p224 same Point", P_p224, P1_p224, true},
{"p224 identity", id_p224, id_p224, true},
{"p256 identity", id_p256, id_p256, true},
{"k256 identity", id_k256, id_k256, true},
{"negative-same x different y", P_p224, &EcPoint{p224, P_p224.X, core.One}, false},
{"negative-same y different x", P_p224, &EcPoint{p224, core.Two, P_k256.Y}, false},
{"negative-wrong curve", P_p224, P_k256, false},
{"negative-wrong curve reversed", P_k256, P_p224, false},
{"Point is not the identity", P_p224, id_p224, false},
{"negative nil", P1_p224, nil, false},
{"identities on wrong curve", id_p256, id_k256, false},
}
// Run all the tests!
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
actual := test.x.Equals(test.y)
require.Equal(t, test.expected, actual)
})
}
}

351
crypto/core/curves/ec_scalar.go Executable file
View File

@ -0,0 +1,351 @@
//
// Copyright Coinbase, Inc. All Rights Reserved.
//
// SPDX-License-Identifier: Apache-2.0
//
package curves
import (
"crypto/elliptic"
crand "crypto/rand"
"crypto/sha512"
"fmt"
"io"
"math/big"
"filippo.io/edwards25519"
"github.com/btcsuite/btcd/btcec/v2"
"github.com/bwesterb/go-ristretto"
"github.com/onsonr/sonr/crypto/core"
"github.com/onsonr/sonr/crypto/core/curves/native/bls12381"
"github.com/onsonr/sonr/crypto/internal"
)
type EcScalar interface {
Add(x, y *big.Int) *big.Int
Sub(x, y *big.Int) *big.Int
Neg(x *big.Int) *big.Int
Mul(x, y *big.Int) *big.Int
Hash(input []byte) *big.Int
Div(x, y *big.Int) *big.Int
Random() (*big.Int, error)
IsValid(x *big.Int) bool
Bytes(x *big.Int) []byte // fixed-length byte array
}
type K256Scalar struct{}
// Static interface assertion
var _ EcScalar = (*K256Scalar)(nil)
// warning: the Euclidean alg which Mod uses is not constant-time.
func NewK256Scalar() *K256Scalar {
return &K256Scalar{}
}
func (k K256Scalar) Add(x, y *big.Int) *big.Int {
v := new(big.Int).Add(x, y)
v.Mod(v, btcec.S256().N)
return v
}
func (k K256Scalar) Sub(x, y *big.Int) *big.Int {
v := new(big.Int).Sub(x, y)
v.Mod(v, btcec.S256().N)
return v
}
func (k K256Scalar) Neg(x *big.Int) *big.Int {
v := new(big.Int).Sub(btcec.S256().N, x)
v.Mod(v, btcec.S256().N)
return v
}
func (k K256Scalar) Mul(x, y *big.Int) *big.Int {
v := new(big.Int).Mul(x, y)
v.Mod(v, btcec.S256().N)
return v
}
func (k K256Scalar) Div(x, y *big.Int) *big.Int {
t := new(big.Int).ModInverse(y, btcec.S256().N)
return k.Mul(x, t)
}
func (k K256Scalar) Hash(input []byte) *big.Int {
return new(ScalarK256).Hash(input).BigInt()
}
func (k K256Scalar) Random() (*big.Int, error) {
b := make([]byte, 48)
n, err := crand.Read(b)
if err != nil {
return nil, err
}
if n != 48 {
return nil, fmt.Errorf("insufficient bytes read")
}
v := new(big.Int).SetBytes(b)
v.Mod(v, btcec.S256().N)
return v, nil
}
func (k K256Scalar) IsValid(x *big.Int) bool {
return core.In(x, btcec.S256().N) == nil
}
func (k K256Scalar) Bytes(x *big.Int) []byte {
bytes := make([]byte, 32)
x.FillBytes(bytes) // big-endian; will left-pad.
return bytes
}
type P256Scalar struct{}
// Static interface assertion
var _ EcScalar = (*P256Scalar)(nil)
func NewP256Scalar() *P256Scalar {
return &P256Scalar{}
}
func (k P256Scalar) Add(x, y *big.Int) *big.Int {
v := new(big.Int).Add(x, y)
v.Mod(v, elliptic.P256().Params().N)
return v
}
func (k P256Scalar) Sub(x, y *big.Int) *big.Int {
v := new(big.Int).Sub(x, y)
v.Mod(v, elliptic.P256().Params().N)
return v
}
func (k P256Scalar) Neg(x *big.Int) *big.Int {
v := new(big.Int).Sub(elliptic.P256().Params().N, x)
v.Mod(v, elliptic.P256().Params().N)
return v
}
func (k P256Scalar) Mul(x, y *big.Int) *big.Int {
v := new(big.Int).Mul(x, y)
v.Mod(v, elliptic.P256().Params().N)
return v
}
func (k P256Scalar) Div(x, y *big.Int) *big.Int {
t := new(big.Int).ModInverse(y, elliptic.P256().Params().N)
return k.Mul(x, t)
}
func (k P256Scalar) Hash(input []byte) *big.Int {
return new(ScalarP256).Hash(input).BigInt()
}
func (k P256Scalar) Random() (*big.Int, error) {
b := make([]byte, 48)
n, err := crand.Read(b)
if err != nil {
return nil, err
}
if n != 48 {
return nil, fmt.Errorf("insufficient bytes read")
}
v := new(big.Int).SetBytes(b)
v.Mod(v, elliptic.P256().Params().N)
return v, nil
}
func (k P256Scalar) IsValid(x *big.Int) bool {
return core.In(x, elliptic.P256().Params().N) == nil
}
func (k P256Scalar) Bytes(x *big.Int) []byte {
bytes := make([]byte, 32)
x.FillBytes(bytes) // big-endian; will left-pad.
return bytes
}
type Bls12381Scalar struct{}
// Static interface assertion
var _ EcScalar = (*Bls12381Scalar)(nil)
func NewBls12381Scalar() *Bls12381Scalar {
return &Bls12381Scalar{}
}
func (k Bls12381Scalar) Add(x, y *big.Int) *big.Int {
a := bls12381.Bls12381FqNew().SetBigInt(x)
b := bls12381.Bls12381FqNew().SetBigInt(y)
return a.Add(a, b).BigInt()
}
func (k Bls12381Scalar) Sub(x, y *big.Int) *big.Int {
a := bls12381.Bls12381FqNew().SetBigInt(x)
b := bls12381.Bls12381FqNew().SetBigInt(y)
return a.Sub(a, b).BigInt()
}
func (k Bls12381Scalar) Neg(x *big.Int) *big.Int {
a := bls12381.Bls12381FqNew().SetBigInt(x)
return a.Neg(a).BigInt()
}
func (k Bls12381Scalar) Mul(x, y *big.Int) *big.Int {
a := bls12381.Bls12381FqNew().SetBigInt(x)
b := bls12381.Bls12381FqNew().SetBigInt(y)
return a.Mul(a, b).BigInt()
}
func (k Bls12381Scalar) Div(x, y *big.Int) *big.Int {
c := bls12381.Bls12381FqNew()
a := bls12381.Bls12381FqNew().SetBigInt(x)
b := bls12381.Bls12381FqNew().SetBigInt(y)
_, wasInverted := c.Invert(b)
c.Mul(a, c)
tt := map[bool]int{false: 0, true: 1}
return a.CMove(a, c, tt[wasInverted]).BigInt()
}
func (k Bls12381Scalar) Hash(input []byte) *big.Int {
return new(ScalarBls12381).Hash(input).BigInt()
}
func (k Bls12381Scalar) Random() (*big.Int, error) {
a := BLS12381G1().NewScalar().Random(crand.Reader)
if a == nil {
return nil, fmt.Errorf("invalid random value")
}
return a.BigInt(), nil
}
func (k Bls12381Scalar) Bytes(x *big.Int) []byte {
bytes := make([]byte, 32)
x.FillBytes(bytes) // big-endian; will left-pad.
return bytes
}
func (k Bls12381Scalar) IsValid(x *big.Int) bool {
a := bls12381.Bls12381FqNew().SetBigInt(x)
return a.BigInt().Cmp(x) == 0
}
// taken from https://datatracker.ietf.org/doc/html/rfc8032
var ed25519N, _ = new(big.Int).SetString("1000000000000000000000000000000014DEF9DEA2F79CD65812631A5CF5D3ED", 16)
type Ed25519Scalar struct{}
// Static interface assertion
var _ EcScalar = (*Ed25519Scalar)(nil)
func NewEd25519Scalar() *Ed25519Scalar {
return &Ed25519Scalar{}
}
func (k Ed25519Scalar) Add(x, y *big.Int) *big.Int {
a, err := internal.BigInt2Ed25519Scalar(x)
if err != nil {
panic(err)
}
b, err := internal.BigInt2Ed25519Scalar(y)
if err != nil {
panic(err)
}
a.Add(a, b)
return new(big.Int).SetBytes(internal.ReverseScalarBytes(a.Bytes()))
}
func (k Ed25519Scalar) Sub(x, y *big.Int) *big.Int {
a, err := internal.BigInt2Ed25519Scalar(x)
if err != nil {
panic(err)
}
b, err := internal.BigInt2Ed25519Scalar(y)
if err != nil {
panic(err)
}
a.Subtract(a, b)
return new(big.Int).SetBytes(internal.ReverseScalarBytes(a.Bytes()))
}
func (k Ed25519Scalar) Neg(x *big.Int) *big.Int {
a, err := internal.BigInt2Ed25519Scalar(x)
if err != nil {
panic(err)
}
a.Negate(a)
return new(big.Int).SetBytes(internal.ReverseScalarBytes(a.Bytes()))
}
func (k Ed25519Scalar) Mul(x, y *big.Int) *big.Int {
a, err := internal.BigInt2Ed25519Scalar(x)
if err != nil {
panic(err)
}
b, err := internal.BigInt2Ed25519Scalar(y)
if err != nil {
panic(err)
}
a.Multiply(a, b)
return new(big.Int).SetBytes(internal.ReverseScalarBytes(a.Bytes()))
}
func (k Ed25519Scalar) Div(x, y *big.Int) *big.Int {
b, err := internal.BigInt2Ed25519Scalar(y)
if err != nil {
panic(err)
}
b.Invert(b)
a, err := internal.BigInt2Ed25519Scalar(x)
if err != nil {
panic(err)
}
a.Multiply(a, b)
return new(big.Int).SetBytes(internal.ReverseScalarBytes(a.Bytes()))
}
func (k Ed25519Scalar) Hash(input []byte) *big.Int {
v := new(ristretto.Scalar).Derive(input)
var data [32]byte
v.BytesInto(&data)
return new(big.Int).SetBytes(internal.ReverseScalarBytes(data[:]))
}
func (k Ed25519Scalar) Bytes(x *big.Int) []byte {
a, err := internal.BigInt2Ed25519Scalar(x)
if err != nil {
panic(err)
}
return internal.ReverseScalarBytes(a.Bytes())
}
func (k Ed25519Scalar) Random() (*big.Int, error) {
return k.RandomWithReader(crand.Reader)
}
func (k Ed25519Scalar) RandomWithReader(r io.Reader) (*big.Int, error) {
b := make([]byte, 64)
n, err := r.Read(b)
if err != nil {
return nil, err
}
if n != 64 {
return nil, fmt.Errorf("insufficient bytes read")
}
digest := sha512.Sum512(b)
var hBytes [32]byte
copy(hBytes[:], digest[:])
s, err := edwards25519.NewScalar().SetBytesWithClamping(hBytes[:])
if err != nil {
return nil, err
}
return new(big.Int).SetBytes(internal.ReverseScalarBytes(s.Bytes())), nil
}
func (k Ed25519Scalar) IsValid(x *big.Int) bool {
return x.Cmp(ed25519N) == -1
}

39
crypto/core/curves/ecdsa.go Executable file
View File

@ -0,0 +1,39 @@
//
// Copyright Coinbase, Inc. All Rights Reserved.
//
// SPDX-License-Identifier: Apache-2.0
//
package curves
import (
"crypto/ecdsa"
"math/big"
)
// EcdsaVerify runs a curve- or algorithm-specific ECDSA verification function on input
// an ECDSA public (verification) key, a message digest, and an ECDSA signature.
// It must return true if all the parameters are sane and the ECDSA signature is valid,
// and false otherwise
type EcdsaVerify func(pubKey *EcPoint, hash []byte, signature *EcdsaSignature) bool
// EcdsaSignature represents a (composite) digital signature
type EcdsaSignature struct {
R *big.Int
S *big.Int
V int
}
// Static type assertion
var _ EcdsaVerify = VerifyEcdsa
// Verifies ECDSA signature using core types.
func VerifyEcdsa(pk *EcPoint, hash []byte, sig *EcdsaSignature) bool {
return ecdsa.Verify(
&ecdsa.PublicKey{
Curve: pk.Curve,
X: pk.X,
Y: pk.Y,
},
hash, sig.R, sig.S)
}

View File

@ -0,0 +1,788 @@
//
// Copyright Coinbase, Inc. All Rights Reserved.
//
// SPDX-License-Identifier: Apache-2.0
//
package curves
import (
"bytes"
"crypto/sha512"
"crypto/subtle"
"fmt"
"io"
"math/big"
"filippo.io/edwards25519"
"filippo.io/edwards25519/field"
"github.com/bwesterb/go-ristretto"
ed "github.com/bwesterb/go-ristretto/edwards25519"
"github.com/onsonr/sonr/crypto/internal"
)
type ScalarEd25519 struct {
value *edwards25519.Scalar
}
type PointEd25519 struct {
value *edwards25519.Point
}
var scOne, _ = edwards25519.NewScalar().SetCanonicalBytes([]byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0})
func (s *ScalarEd25519) Random(reader io.Reader) Scalar {
if reader == nil {
return nil
}
var seed [64]byte
_, _ = reader.Read(seed[:])
return s.Hash(seed[:])
}
func (s *ScalarEd25519) Hash(bytes []byte) Scalar {
v := new(ristretto.Scalar).Derive(bytes)
var data [32]byte
v.BytesInto(&data)
value, err := edwards25519.NewScalar().SetCanonicalBytes(data[:])
if err != nil {
return nil
}
return &ScalarEd25519{value}
}
func (s *ScalarEd25519) Zero() Scalar {
return &ScalarEd25519{
value: edwards25519.NewScalar(),
}
}
func (s *ScalarEd25519) One() Scalar {
return &ScalarEd25519{
value: edwards25519.NewScalar().Set(scOne),
}
}
func (s *ScalarEd25519) IsZero() bool {
i := byte(0)
for _, b := range s.value.Bytes() {
i |= b
}
return i == 0
}
func (s *ScalarEd25519) IsOne() bool {
data := s.value.Bytes()
i := byte(0)
for j := 1; j < len(data); j++ {
i |= data[j]
}
return i == 0 && data[0] == 1
}
func (s *ScalarEd25519) IsOdd() bool {
return s.value.Bytes()[0]&1 == 1
}
func (s *ScalarEd25519) IsEven() bool {
return s.value.Bytes()[0]&1 == 0
}
func (s *ScalarEd25519) New(input int) Scalar {
var data [64]byte
i := input
if input < 0 {
i = -input
}
data[0] = byte(i)
data[1] = byte(i >> 8)
data[2] = byte(i >> 16)
data[3] = byte(i >> 24)
value, err := edwards25519.NewScalar().SetUniformBytes(data[:])
if err != nil {
return nil
}
if input < 0 {
value.Negate(value)
}
return &ScalarEd25519{
value,
}
}
func (s *ScalarEd25519) Cmp(rhs Scalar) int {
r := s.Sub(rhs)
if r != nil && r.IsZero() {
return 0
} else {
return -2
}
}
func (s *ScalarEd25519) Square() Scalar {
value := edwards25519.NewScalar().Multiply(s.value, s.value)
return &ScalarEd25519{value}
}
func (s *ScalarEd25519) Double() Scalar {
return &ScalarEd25519{
value: edwards25519.NewScalar().Add(s.value, s.value),
}
}
func (s *ScalarEd25519) Invert() (Scalar, error) {
return &ScalarEd25519{
value: edwards25519.NewScalar().Invert(s.value),
}, nil
}
func (s *ScalarEd25519) Sqrt() (Scalar, error) {
bi25519, _ := new(big.Int).SetString("1000000000000000000000000000000014DEF9DEA2F79CD65812631A5CF5D3ED", 16)
x := s.BigInt()
x.ModSqrt(x, bi25519)
return s.SetBigInt(x)
}
func (s *ScalarEd25519) Cube() Scalar {
value := edwards25519.NewScalar().Multiply(s.value, s.value)
value.Multiply(value, s.value)
return &ScalarEd25519{value}
}
func (s *ScalarEd25519) Add(rhs Scalar) Scalar {
r, ok := rhs.(*ScalarEd25519)
if ok {
return &ScalarEd25519{
value: edwards25519.NewScalar().Add(s.value, r.value),
}
} else {
return nil
}
}
func (s *ScalarEd25519) Sub(rhs Scalar) Scalar {
r, ok := rhs.(*ScalarEd25519)
if ok {
return &ScalarEd25519{
value: edwards25519.NewScalar().Subtract(s.value, r.value),
}
} else {
return nil
}
}
func (s *ScalarEd25519) Mul(rhs Scalar) Scalar {
r, ok := rhs.(*ScalarEd25519)
if ok {
return &ScalarEd25519{
value: edwards25519.NewScalar().Multiply(s.value, r.value),
}
} else {
return nil
}
}
func (s *ScalarEd25519) MulAdd(y, z Scalar) Scalar {
yy, ok := y.(*ScalarEd25519)
if !ok {
return nil
}
zz, ok := z.(*ScalarEd25519)
if !ok {
return nil
}
return &ScalarEd25519{value: edwards25519.NewScalar().MultiplyAdd(s.value, yy.value, zz.value)}
}
func (s *ScalarEd25519) Div(rhs Scalar) Scalar {
r, ok := rhs.(*ScalarEd25519)
if ok {
value := edwards25519.NewScalar().Invert(r.value)
value.Multiply(value, s.value)
return &ScalarEd25519{value}
} else {
return nil
}
}
func (s *ScalarEd25519) Neg() Scalar {
return &ScalarEd25519{
value: edwards25519.NewScalar().Negate(s.value),
}
}
func (s *ScalarEd25519) SetBigInt(x *big.Int) (Scalar, error) {
if x == nil {
return nil, fmt.Errorf("invalid value")
}
bi25519, _ := new(big.Int).SetString("1000000000000000000000000000000014DEF9DEA2F79CD65812631A5CF5D3ED", 16)
var v big.Int
buf := v.Mod(x, bi25519).Bytes()
var rBuf [32]byte
for i := 0; i < len(buf) && i < 32; i++ {
rBuf[i] = buf[len(buf)-i-1]
}
value, err := edwards25519.NewScalar().SetCanonicalBytes(rBuf[:])
if err != nil {
return nil, err
}
return &ScalarEd25519{value}, nil
}
func (s *ScalarEd25519) BigInt() *big.Int {
var ret big.Int
buf := internal.ReverseScalarBytes(s.value.Bytes())
return ret.SetBytes(buf)
}
func (s *ScalarEd25519) Bytes() []byte {
return s.value.Bytes()
}
// SetBytes takes input a 32-byte long array and returns a ed25519 scalar.
// The input must be 32-byte long and must be a reduced bytes.
func (s *ScalarEd25519) SetBytes(input []byte) (Scalar, error) {
if len(input) != 32 {
return nil, fmt.Errorf("invalid byte sequence")
}
value, err := edwards25519.NewScalar().SetCanonicalBytes(input)
if err != nil {
return nil, err
}
return &ScalarEd25519{value}, nil
}
// SetBytesWide takes input a 64-byte long byte array, reduce it and return an ed25519 scalar.
// It uses SetUniformBytes of fillipo.io/edwards25519 - https://github.com/FiloSottile/edwards25519/blob/v1.0.0-rc.1/scalar.go#L85
// If bytes is not of the right length, it returns nil and an error
func (s *ScalarEd25519) SetBytesWide(bytes []byte) (Scalar, error) {
value, err := edwards25519.NewScalar().SetUniformBytes(bytes)
if err != nil {
return nil, err
}
return &ScalarEd25519{value}, nil
}
// SetBytesClamping uses SetBytesWithClamping of fillipo.io/edwards25519- https://github.com/FiloSottile/edwards25519/blob/v1.0.0-rc.1/scalar.go#L135
// which applies the buffer pruning described in RFC 8032, Section 5.1.5 (also known as clamping)
// and sets bytes to the result. The input must be 32-byte long, and it is not modified.
// If bytes is not of the right length, SetBytesWithClamping returns nil and an error, and the receiver is unchanged.
func (s *ScalarEd25519) SetBytesClamping(bytes []byte) (Scalar, error) {
value, err := edwards25519.NewScalar().SetBytesWithClamping(bytes)
if err != nil {
return nil, err
}
return &ScalarEd25519{value}, nil
}
// SetBytesCanonical uses SetCanonicalBytes of fillipo.io/edwards25519.
// https://github.com/FiloSottile/edwards25519/blob/v1.0.0-rc.1/scalar.go#L98
// This function takes an input x and sets s = x, where x is a 32-byte little-endian
// encoding of s, then it returns the corresponding ed25519 scalar. If the input is
// not a canonical encoding of s, it returns nil and an error.
func (s *ScalarEd25519) SetBytesCanonical(bytes []byte) (Scalar, error) {
return s.SetBytes(bytes)
}
func (s *ScalarEd25519) Point() Point {
return new(PointEd25519).Identity()
}
func (s *ScalarEd25519) Clone() Scalar {
return &ScalarEd25519{
value: edwards25519.NewScalar().Set(s.value),
}
}
func (s *ScalarEd25519) MarshalBinary() ([]byte, error) {
return scalarMarshalBinary(s)
}
func (s *ScalarEd25519) UnmarshalBinary(input []byte) error {
sc, err := scalarUnmarshalBinary(input)
if err != nil {
return err
}
ss, ok := sc.(*ScalarEd25519)
if !ok {
return fmt.Errorf("invalid scalar")
}
s.value = ss.value
return nil
}
func (s *ScalarEd25519) MarshalText() ([]byte, error) {
return scalarMarshalText(s)
}
func (s *ScalarEd25519) UnmarshalText(input []byte) error {
sc, err := scalarUnmarshalText(input)
if err != nil {
return err
}
ss, ok := sc.(*ScalarEd25519)
if !ok {
return fmt.Errorf("invalid scalar")
}
s.value = ss.value
return nil
}
func (s *ScalarEd25519) GetEdwardsScalar() *edwards25519.Scalar {
return edwards25519.NewScalar().Set(s.value)
}
func (s *ScalarEd25519) SetEdwardsScalar(sc *edwards25519.Scalar) *ScalarEd25519 {
return &ScalarEd25519{value: edwards25519.NewScalar().Set(sc)}
}
func (s *ScalarEd25519) MarshalJSON() ([]byte, error) {
return scalarMarshalJson(s)
}
func (s *ScalarEd25519) UnmarshalJSON(input []byte) error {
sc, err := scalarUnmarshalJson(input)
if err != nil {
return err
}
S, ok := sc.(*ScalarEd25519)
if !ok {
return fmt.Errorf("invalid type")
}
s.value = S.value
return nil
}
func (p *PointEd25519) Random(reader io.Reader) Point {
var seed [64]byte
_, _ = reader.Read(seed[:])
return p.Hash(seed[:])
}
func (p *PointEd25519) Hash(bytes []byte) Point {
/// Perform hashing to the group using the Elligator2 map
///
/// See https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-11#section-6.7.1
h := sha512.Sum512(bytes)
var res [32]byte
copy(res[:], h[:32])
signBit := (res[31] & 0x80) >> 7
fe := new(ed.FieldElement).SetBytes(&res).BytesInto(&res)
m1 := elligatorEncode(fe)
return toEdwards(m1, signBit)
}
func (p *PointEd25519) Identity() Point {
return &PointEd25519{
value: edwards25519.NewIdentityPoint(),
}
}
func (p *PointEd25519) Generator() Point {
return &PointEd25519{
value: edwards25519.NewGeneratorPoint(),
}
}
func (p *PointEd25519) IsIdentity() bool {
return p.Equal(p.Identity())
}
func (p *PointEd25519) IsNegative() bool {
// Negative points don't really exist in ed25519
return false
}
func (p *PointEd25519) IsOnCurve() bool {
_, err := edwards25519.NewIdentityPoint().SetBytes(p.ToAffineCompressed())
return err == nil
}
func (p *PointEd25519) Double() Point {
return &PointEd25519{value: edwards25519.NewIdentityPoint().Add(p.value, p.value)}
}
func (p *PointEd25519) Scalar() Scalar {
return new(ScalarEd25519).Zero()
}
func (p *PointEd25519) Neg() Point {
return &PointEd25519{value: edwards25519.NewIdentityPoint().Negate(p.value)}
}
func (p *PointEd25519) Add(rhs Point) Point {
if rhs == nil {
return nil
}
r, ok := rhs.(*PointEd25519)
if ok {
return &PointEd25519{value: edwards25519.NewIdentityPoint().Add(p.value, r.value)}
} else {
return nil
}
}
func (p *PointEd25519) Sub(rhs Point) Point {
if rhs == nil {
return nil
}
r, ok := rhs.(*PointEd25519)
if ok {
rTmp := edwards25519.NewIdentityPoint().Negate(r.value)
return &PointEd25519{value: edwards25519.NewIdentityPoint().Add(p.value, rTmp)}
} else {
return nil
}
}
func (p *PointEd25519) Mul(rhs Scalar) Point {
if rhs == nil {
return nil
}
r, ok := rhs.(*ScalarEd25519)
if ok {
value := edwards25519.NewIdentityPoint().ScalarMult(r.value, p.value)
return &PointEd25519{value}
} else {
return nil
}
}
// MangleScalarBitsAndMulByBasepointToProducePublicKey
// is a function for mangling the bits of a (formerly
// mathematically well-defined) "scalar" and multiplying it to produce a
// public key.
func (p *PointEd25519) MangleScalarBitsAndMulByBasepointToProducePublicKey(rhs *ScalarEd25519) *PointEd25519 {
data := rhs.value.Bytes()
s, err := edwards25519.NewScalar().SetBytesWithClamping(data[:])
if err != nil {
return nil
}
value := edwards25519.NewIdentityPoint().ScalarBaseMult(s)
return &PointEd25519{value}
}
func (p *PointEd25519) Equal(rhs Point) bool {
r, ok := rhs.(*PointEd25519)
if ok {
// We would like to check that the point (X/Z, Y/Z) is equal to
// the point (X'/Z', Y'/Z') without converting into affine
// coordinates (x, y) and (x', y'), which requires two inversions.
// We have that X = xZ and X' = x'Z'. Thus, x = x' is equivalent to
// (xZ)Z' = (x'Z')Z, and similarly for the y-coordinate.
return p.value.Equal(r.value) == 1
//lhs1 := new(ed.FieldElement).Mul(&p.value.X, &r.value.Z)
//rhs1 := new(ed.FieldElement).Mul(&r.value.X, &p.value.Z)
//lhs2 := new(ed.FieldElement).Mul(&p.value.Y, &r.value.Z)
//rhs2 := new(ed.FieldElement).Mul(&r.value.Y, &p.value.Z)
//
//return lhs1.Equals(rhs1) && lhs2.Equals(rhs2)
} else {
return false
}
}
func (p *PointEd25519) Set(x, y *big.Int) (Point, error) {
// check is identity
xx := subtle.ConstantTimeCompare(x.Bytes(), []byte{})
yy := subtle.ConstantTimeCompare(y.Bytes(), []byte{})
if (xx | yy) == 1 {
return p.Identity(), nil
}
xElem := new(ed.FieldElement).SetBigInt(x)
yElem := new(ed.FieldElement).SetBigInt(y)
var data [32]byte
var affine [64]byte
xElem.BytesInto(&data)
copy(affine[:32], data[:])
yElem.BytesInto(&data)
copy(affine[32:], data[:])
return p.FromAffineUncompressed(affine[:])
}
// sqrtRatio sets r to the non-negative square root of the ratio of u and v.
//
// If u/v is square, sqrtRatio returns r and 1. If u/v is not square, SqrtRatio
// sets r according to Section 4.3 of draft-irtf-cfrg-ristretto255-decaf448-00,
// and returns r and 0.
func sqrtRatio(u, v *ed.FieldElement) (r *ed.FieldElement, wasSquare bool) {
sqrtM1 := ed.FieldElement{
533094393274173, 2016890930128738, 18285341111199,
134597186663265, 1486323764102114,
}
a := new(ed.FieldElement)
b := new(ed.FieldElement)
r = new(ed.FieldElement)
// r = (u * v3) * (u * v7)^((p-5)/8)
v2 := a.Square(v)
uv3 := b.Mul(u, b.Mul(v2, v))
uv7 := a.Mul(uv3, a.Square(v2))
r.Mul(uv3, r.Exp22523(uv7))
check := a.Mul(v, a.Square(r)) // check = v * r^2
uNeg := b.Neg(u)
correctSignSqrt := check.Equals(u)
flippedSignSqrt := check.Equals(uNeg)
flippedSignSqrtI := check.Equals(uNeg.Mul(uNeg, &sqrtM1))
rPrime := b.Mul(r, &sqrtM1) // r_prime = SQRT_M1 * r
// r = CT_SELECT(r_prime IF flipped_sign_sqrt | flipped_sign_sqrt_i ELSE r)
cselect(r, rPrime, r, flippedSignSqrt || flippedSignSqrtI)
r.Abs(r) // Choose the nonnegative square root.
return r, correctSignSqrt || flippedSignSqrt
}
// cselect sets v to a if cond == 1, and to b if cond == 0.
func cselect(v, a, b *ed.FieldElement, cond bool) *ed.FieldElement {
const mask64Bits uint64 = (1 << 64) - 1
m := uint64(0)
if cond {
m = mask64Bits
}
v[0] = (m & a[0]) | (^m & b[0])
v[1] = (m & a[1]) | (^m & b[1])
v[2] = (m & a[2]) | (^m & b[2])
v[3] = (m & a[3]) | (^m & b[3])
v[4] = (m & a[4]) | (^m & b[4])
return v
}
func (p *PointEd25519) ToAffineCompressed() []byte {
return p.value.Bytes()
}
func (p *PointEd25519) ToAffineUncompressed() []byte {
x, y, z, _ := p.value.ExtendedCoordinates()
recip := new(field.Element).Invert(z)
x.Multiply(x, recip)
y.Multiply(y, recip)
var out [64]byte
copy(out[:32], x.Bytes())
copy(out[32:], y.Bytes())
return out[:]
}
func (p *PointEd25519) FromAffineCompressed(inBytes []byte) (Point, error) {
pt, err := edwards25519.NewIdentityPoint().SetBytes(inBytes)
if err != nil {
return nil, err
}
return &PointEd25519{value: pt}, nil
}
func (p *PointEd25519) FromAffineUncompressed(inBytes []byte) (Point, error) {
if len(inBytes) != 64 {
return nil, fmt.Errorf("invalid byte sequence")
}
if bytes.Equal(inBytes, []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}) {
return &PointEd25519{value: edwards25519.NewIdentityPoint()}, nil
}
x, err := new(field.Element).SetBytes(inBytes[:32])
if err != nil {
return nil, err
}
y, err := new(field.Element).SetBytes(inBytes[32:])
if err != nil {
return nil, err
}
z := new(field.Element).One()
t := new(field.Element).Multiply(x, y)
value, err := edwards25519.NewIdentityPoint().SetExtendedCoordinates(x, y, z, t)
if err != nil {
return nil, err
}
return &PointEd25519{value}, nil
}
func (p *PointEd25519) CurveName() string {
return ED25519Name
}
func (p *PointEd25519) SumOfProducts(points []Point, scalars []Scalar) Point {
nScalars := make([]*edwards25519.Scalar, len(scalars))
nPoints := make([]*edwards25519.Point, len(points))
for i, sc := range scalars {
s, err := edwards25519.NewScalar().SetCanonicalBytes(sc.Bytes())
if err != nil {
return nil
}
nScalars[i] = s
}
for i, pt := range points {
pp, ok := pt.(*PointEd25519)
if !ok {
return nil
}
nPoints[i] = pp.value
}
pt := edwards25519.NewIdentityPoint().MultiScalarMult(nScalars, nPoints)
return &PointEd25519{value: pt}
}
func (p *PointEd25519) VarTimeDoubleScalarBaseMult(a Scalar, A Point, b Scalar) Point {
AA, ok := A.(*PointEd25519)
if !ok {
return nil
}
aa, ok := a.(*ScalarEd25519)
if !ok {
return nil
}
bb, ok := b.(*ScalarEd25519)
if !ok {
return nil
}
value := edwards25519.NewIdentityPoint().VarTimeDoubleScalarBaseMult(aa.value, AA.value, bb.value)
return &PointEd25519{value}
}
func (p *PointEd25519) MarshalBinary() ([]byte, error) {
return pointMarshalBinary(p)
}
func (p *PointEd25519) UnmarshalBinary(input []byte) error {
pt, err := pointUnmarshalBinary(input)
if err != nil {
return err
}
ppt, ok := pt.(*PointEd25519)
if !ok {
return fmt.Errorf("invalid point")
}
p.value = ppt.value
return nil
}
func (p *PointEd25519) MarshalText() ([]byte, error) {
return pointMarshalText(p)
}
func (p *PointEd25519) UnmarshalText(input []byte) error {
pt, err := pointUnmarshalText(input)
if err != nil {
return err
}
ppt, ok := pt.(*PointEd25519)
if !ok {
return fmt.Errorf("invalid point")
}
p.value = ppt.value
return nil
}
func (p *PointEd25519) MarshalJSON() ([]byte, error) {
return pointMarshalJson(p)
}
func (p *PointEd25519) UnmarshalJSON(input []byte) error {
pt, err := pointUnmarshalJson(input)
if err != nil {
return err
}
P, ok := pt.(*PointEd25519)
if !ok {
return fmt.Errorf("invalid type")
}
p.value = P.value
return nil
}
func (p *PointEd25519) GetEdwardsPoint() *edwards25519.Point {
return edwards25519.NewIdentityPoint().Set(p.value)
}
func (p *PointEd25519) SetEdwardsPoint(pt *edwards25519.Point) *PointEd25519 {
return &PointEd25519{value: edwards25519.NewIdentityPoint().Set(pt)}
}
// Attempt to convert to an `EdwardsPoint`, using the supplied
// choice of sign for the `EdwardsPoint`.
// - `sign`: a `u8` donating the desired sign of the resulting
// `EdwardsPoint`. `0` denotes positive and `1` negative.
func toEdwards(u *ed.FieldElement, sign byte) *PointEd25519 {
one := new(ed.FieldElement).SetOne()
// To decompress the Montgomery u coordinate to an
// `EdwardsPoint`, we apply the birational map to obtain the
// Edwards y coordinate, then do Edwards decompression.
//
// The birational map is y = (u-1)/(u+1).
//
// The exceptional points are the zeros of the denominator,
// i.e., u = -1.
//
// But when u = -1, v^2 = u*(u^2+486662*u+1) = 486660.
//
// Since this is nonsquare mod p, u = -1 corresponds to a point
// on the twist, not the curve, so we can reject it early.
if u.Equals(new(ed.FieldElement).Neg(one)) {
return nil
}
// y = (u-1)/(u+1)
yLhs := new(ed.FieldElement).Sub(u, one)
yRhs := new(ed.FieldElement).Add(u, one)
yInv := new(ed.FieldElement).Inverse(yRhs)
y := new(ed.FieldElement).Mul(yLhs, yInv)
yBytes := y.Bytes()
yBytes[31] ^= sign << 7
pt, err := edwards25519.NewIdentityPoint().SetBytes(yBytes[:])
if err != nil {
return nil
}
pt.MultByCofactor(pt)
return &PointEd25519{value: pt}
}
// Perform the Elligator2 mapping to a Montgomery point encoded as a 32 byte value
//
// See <https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-11#section-6.7.1>
func elligatorEncode(r0 *ed.FieldElement) *ed.FieldElement {
montgomeryA := &ed.FieldElement{
486662, 0, 0, 0, 0,
}
// montgomeryANeg is equal to -486662.
montgomeryANeg := &ed.FieldElement{
2251799813198567,
2251799813685247,
2251799813685247,
2251799813685247,
2251799813685247,
}
t := new(ed.FieldElement)
one := new(ed.FieldElement).SetOne()
// 2r^2
d1 := new(ed.FieldElement).Add(one, t.DoubledSquare(r0))
// A/(1+2r^2)
d := new(ed.FieldElement).Mul(montgomeryANeg, t.Inverse(d1))
dsq := new(ed.FieldElement).Square(d)
au := new(ed.FieldElement).Mul(montgomeryA, d)
inner := new(ed.FieldElement).Add(dsq, au)
inner.Add(inner, one)
// d^3 + Ad^2 + d
eps := new(ed.FieldElement).Mul(d, inner)
_, wasSquare := sqrtRatio(eps, one)
zero := new(ed.FieldElement).SetZero()
aTemp := new(ed.FieldElement).SetZero()
// 0 or A if non-square
cselect(aTemp, zero, montgomeryA, wasSquare)
// d, or d+A if non-square
u := new(ed.FieldElement).Add(d, aTemp)
// d or -d-A if non-square
cselect(u, u, new(ed.FieldElement).Neg(u), wasSquare)
return u
}

View File

@ -0,0 +1,403 @@
//
// Copyright Coinbase, Inc. All Rights Reserved.
//
// SPDX-License-Identifier: Apache-2.0
//
package curves
import (
crand "crypto/rand"
"encoding/hex"
"math/big"
"testing"
ed "filippo.io/edwards25519"
"github.com/stretchr/testify/require"
"github.com/onsonr/sonr/crypto/internal"
)
func TestScalarEd25519Random(t *testing.T) {
ed25519 := ED25519()
sc := ed25519.Scalar.Random(testRng())
s, ok := sc.(*ScalarEd25519)
require.True(t, ok)
expected := toRSc("feaa6a9d6dda758da6145f7d411a3af9f8a120698e0093faa97085b384c3f00e")
require.Equal(t, s.value.Equal(expected), 1)
// Try 10 random values
for i := 0; i < 10; i++ {
sc := ed25519.Scalar.Random(crand.Reader)
_, ok := sc.(*ScalarEd25519)
require.True(t, ok)
require.True(t, !sc.IsZero())
}
}
func TestScalarEd25519Hash(t *testing.T) {
var b [32]byte
ed25519 := ED25519()
sc := ed25519.Scalar.Hash(b[:])
s, ok := sc.(*ScalarEd25519)
require.True(t, ok)
expected := toRSc("9d574494a02d72f5ff311cf0fb844d0fdd6103b17255274e029bdeed7207d409")
require.Equal(t, s.value.Equal(expected), 1)
}
func TestScalarEd25519Zero(t *testing.T) {
ed25519 := ED25519()
sc := ed25519.Scalar.Zero()
require.True(t, sc.IsZero())
require.True(t, sc.IsEven())
}
func TestScalarEd25519One(t *testing.T) {
ed25519 := ED25519()
sc := ed25519.Scalar.One()
require.True(t, sc.IsOne())
require.True(t, sc.IsOdd())
}
func TestScalarEd25519New(t *testing.T) {
ed25519 := ED25519()
three := ed25519.Scalar.New(3)
require.True(t, three.IsOdd())
four := ed25519.Scalar.New(4)
require.True(t, four.IsEven())
neg1 := ed25519.Scalar.New(-1)
require.True(t, neg1.IsEven())
neg2 := ed25519.Scalar.New(-2)
require.True(t, neg2.IsOdd())
}
func TestScalarEd25519Square(t *testing.T) {
ed25519 := ED25519()
three := ed25519.Scalar.New(3)
nine := ed25519.Scalar.New(9)
require.Equal(t, three.Square().Cmp(nine), 0)
}
func TestScalarEd25519Cube(t *testing.T) {
ed25519 := ED25519()
three := ed25519.Scalar.New(3)
twentySeven := ed25519.Scalar.New(27)
require.Equal(t, three.Cube().Cmp(twentySeven), 0)
}
func TestScalarEd25519Double(t *testing.T) {
ed25519 := ED25519()
three := ed25519.Scalar.New(3)
six := ed25519.Scalar.New(6)
require.Equal(t, three.Double().Cmp(six), 0)
}
func TestScalarEd25519Neg(t *testing.T) {
ed25519 := ED25519()
one := ed25519.Scalar.One()
neg1 := ed25519.Scalar.New(-1)
require.Equal(t, one.Neg().Cmp(neg1), 0)
lotsOfThrees := ed25519.Scalar.New(333333)
expected := ed25519.Scalar.New(-333333)
require.Equal(t, lotsOfThrees.Neg().Cmp(expected), 0)
}
func TestScalarEd25519Invert(t *testing.T) {
ed25519 := ED25519()
nine := ed25519.Scalar.New(9)
actual, _ := nine.Invert()
sa, _ := actual.(*ScalarEd25519)
expected := toRSc("c3d9c4db0516043013b1e1ce8637dc92e3388ee3388ee3388ee3388ee3388e03")
require.Equal(t, sa.value.Equal(expected), 1)
}
func TestScalarEd25519Sqrt(t *testing.T) {
ed25519 := ED25519()
nine := ed25519.Scalar.New(9)
actual, err := nine.Sqrt()
sa, _ := actual.(*ScalarEd25519)
expected := toRSc("03")
require.NoError(t, err)
require.Equal(t, sa.value.Equal(expected), 1)
}
func TestScalarEd25519Add(t *testing.T) {
ed25519 := ED25519()
nine := ed25519.Scalar.New(9)
six := ed25519.Scalar.New(6)
fifteen := nine.Add(six)
require.NotNil(t, fifteen)
expected := ed25519.Scalar.New(15)
require.Equal(t, expected.Cmp(fifteen), 0)
upper := ed25519.Scalar.New(-3)
actual := upper.Add(nine)
require.NotNil(t, actual)
require.Equal(t, actual.Cmp(six), 0)
}
func TestScalarEd25519Sub(t *testing.T) {
ed25519 := ED25519()
nine := ed25519.Scalar.New(9)
six := ed25519.Scalar.New(6)
expected := ed25519.Scalar.New(-3)
actual := six.Sub(nine)
require.Equal(t, expected.Cmp(actual), 0)
actual = nine.Sub(six)
require.Equal(t, actual.Cmp(ed25519.Scalar.New(3)), 0)
}
func TestScalarEd25519Mul(t *testing.T) {
ed25519 := ED25519()
nine := ed25519.Scalar.New(9)
six := ed25519.Scalar.New(6)
actual := nine.Mul(six)
require.Equal(t, actual.Cmp(ed25519.Scalar.New(54)), 0)
upper := ed25519.Scalar.New(-1)
require.Equal(t, upper.Mul(upper).Cmp(ed25519.Scalar.New(1)), 0)
}
func TestScalarEd25519Div(t *testing.T) {
ed25519 := ED25519()
nine := ed25519.Scalar.New(9)
actual := nine.Div(nine)
require.Equal(t, actual.Cmp(ed25519.Scalar.New(1)), 0)
require.Equal(t, ed25519.Scalar.New(54).Div(nine).Cmp(ed25519.Scalar.New(6)), 0)
}
func TestScalarEd25519Serialize(t *testing.T) {
ed25519 := ED25519()
sc := ed25519.Scalar.New(255)
sequence := sc.Bytes()
require.Equal(t, len(sequence), 32)
require.Equal(t, sequence, []byte{0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0})
ret, err := ed25519.Scalar.SetBytes(sequence)
require.NoError(t, err)
require.Equal(t, ret.Cmp(sc), 0)
// Try 10 random values
for i := 0; i < 10; i++ {
sc = ed25519.Scalar.Random(crand.Reader)
sequence = sc.Bytes()
require.Equal(t, len(sequence), 32)
ret, err = ed25519.Scalar.SetBytes(sequence)
require.NoError(t, err)
require.Equal(t, ret.Cmp(sc), 0)
}
}
func TestScalarEd25519Nil(t *testing.T) {
ed25519 := ED25519()
one := ed25519.Scalar.New(1)
require.Nil(t, one.Add(nil))
require.Nil(t, one.Sub(nil))
require.Nil(t, one.Mul(nil))
require.Nil(t, one.Div(nil))
require.Nil(t, ed25519.Scalar.Random(nil))
require.Equal(t, one.Cmp(nil), -2)
_, err := ed25519.Scalar.SetBigInt(nil)
require.Error(t, err)
}
func TestPointEd25519Random(t *testing.T) {
ed25519 := ED25519()
sc := ed25519.Point.Random(testRng())
s, ok := sc.(*PointEd25519)
require.True(t, ok)
expected := toRPt("6011540c6231421a70ced5f577432531f198d318facfaad6e52cc42fba6e6fc5")
require.True(t, s.Equal(&PointEd25519{expected}))
// Try 25 random values
for i := 0; i < 25; i++ {
sc := ed25519.Point.Random(crand.Reader)
_, ok := sc.(*PointEd25519)
require.True(t, ok)
require.True(t, !sc.IsIdentity())
pBytes := sc.ToAffineCompressed()
_, err := ed.NewIdentityPoint().SetBytes(pBytes)
require.NoError(t, err)
}
}
func TestPointEd25519Hash(t *testing.T) {
var b [32]byte
ed25519 := ED25519()
sc := ed25519.Point.Hash(b[:])
s, ok := sc.(*PointEd25519)
require.True(t, ok)
expected := toRPt("b4d75c3bb03ca644ab6c6d2a955c911003d8cfa719415de93a6b85eeb0c8dd97")
require.True(t, s.Equal(&PointEd25519{expected}))
// Fuzz test
for i := 0; i < 25; i++ {
_, _ = crand.Read(b[:])
sc = ed25519.Point.Hash(b[:])
require.NotNil(t, sc)
}
}
func TestPointEd25519Identity(t *testing.T) {
ed25519 := ED25519()
sc := ed25519.Point.Identity()
require.True(t, sc.IsIdentity())
require.Equal(t, sc.ToAffineCompressed(), []byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0})
}
func TestPointEd25519Generator(t *testing.T) {
ed25519 := ED25519()
sc := ed25519.Point.Generator()
s, ok := sc.(*PointEd25519)
require.True(t, ok)
require.Equal(t, s.ToAffineCompressed(), []byte{0x58, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66})
}
func TestPointEd25519Set(t *testing.T) {
ed25519 := ED25519()
iden, err := ed25519.Point.Set(big.NewInt(0), big.NewInt(0))
require.NoError(t, err)
require.True(t, iden.IsIdentity())
xBytes, _ := hex.DecodeString("1ad5258f602d56c9b2a7259560c72c695cdcd6fd31e2a4c0fe536ecdd3366921")
yBytes, _ := hex.DecodeString("5866666666666666666666666666666666666666666666666666666666666666")
x := new(big.Int).SetBytes(internal.ReverseScalarBytes(xBytes))
y := new(big.Int).SetBytes(internal.ReverseScalarBytes(yBytes))
newPoint, err := ed25519.Point.Set(x, y)
require.NoError(t, err)
require.NotEqualf(t, iden, newPoint, "after setting valid x and y, the point should NOT be identity point")
emptyX := new(big.Int).SetBytes(internal.ReverseScalarBytes([]byte{}))
identityPoint, err := ed25519.Point.Set(emptyX, y)
require.NoError(t, err)
require.Equalf(t, iden, identityPoint, "When x is empty, the point will be identity")
}
func TestPointEd25519Double(t *testing.T) {
ed25519 := ED25519()
g := ed25519.Point.Generator()
g2 := g.Double()
require.True(t, g2.Equal(g.Mul(ed25519.Scalar.New(2))))
i := ed25519.Point.Identity()
require.True(t, i.Double().Equal(i))
}
func TestPointEd25519Neg(t *testing.T) {
ed25519 := ED25519()
g := ed25519.Point.Generator().Neg()
require.True(t, g.Neg().Equal(ed25519.Point.Generator()))
require.True(t, ed25519.Point.Identity().Neg().Equal(ed25519.Point.Identity()))
}
func TestPointEd25519Add(t *testing.T) {
ed25519 := ED25519()
pt := ed25519.Point.Generator()
require.True(t, pt.Add(pt).Equal(pt.Double()))
require.True(t, pt.Mul(ed25519.Scalar.New(3)).Equal(pt.Add(pt).Add(pt)))
}
func TestPointEd25519Sub(t *testing.T) {
ed25519 := ED25519()
g := ed25519.Point.Generator()
pt := ed25519.Point.Generator().Mul(ed25519.Scalar.New(4))
require.True(t, pt.Sub(g).Sub(g).Sub(g).Equal(g))
require.True(t, pt.Sub(g).Sub(g).Sub(g).Sub(g).IsIdentity())
}
func TestPointEd25519Mul(t *testing.T) {
ed25519 := ED25519()
g := ed25519.Point.Generator()
pt := ed25519.Point.Generator().Mul(ed25519.Scalar.New(4))
require.True(t, g.Double().Double().Equal(pt))
}
func TestPointEd25519Serialize(t *testing.T) {
ed25519 := ED25519()
ss := ed25519.Scalar.Random(testRng())
g := ed25519.Point.Generator()
ppt := g.Mul(ss)
expectedC := []byte{0x7f, 0x5b, 0xa, 0xd9, 0xb8, 0xce, 0xb7, 0x7, 0x4c, 0x10, 0xc8, 0xb4, 0x27, 0xe8, 0xd2, 0x28, 0x50, 0x42, 0x6c, 0x0, 0x8a, 0x3, 0x72, 0x2b, 0x7c, 0x3c, 0x37, 0x6f, 0xf8, 0x8f, 0x42, 0x5d}
expectedU := []byte{0x70, 0xad, 0x4, 0xa1, 0x6, 0x8, 0x9f, 0x47, 0xe1, 0xe8, 0x9b, 0x9c, 0x81, 0x5a, 0xfb, 0xb9, 0x85, 0x6a, 0x2c, 0xa, 0xbc, 0xff, 0xe, 0xc6, 0xa0, 0xb0, 0xac, 0x75, 0xc, 0xd8, 0x59, 0x53, 0x7f, 0x5b, 0xa, 0xd9, 0xb8, 0xce, 0xb7, 0x7, 0x4c, 0x10, 0xc8, 0xb4, 0x27, 0xe8, 0xd2, 0x28, 0x50, 0x42, 0x6c, 0x0, 0x8a, 0x3, 0x72, 0x2b, 0x7c, 0x3c, 0x37, 0x6f, 0xf8, 0x8f, 0x42, 0x5d}
require.Equal(t, ppt.ToAffineCompressed(), expectedC)
require.Equal(t, ppt.ToAffineUncompressed(), expectedU)
retP, err := ppt.FromAffineCompressed(ppt.ToAffineCompressed())
require.NoError(t, err)
require.True(t, ppt.Equal(retP))
retP, err = ppt.FromAffineUncompressed(ppt.ToAffineUncompressed())
require.NoError(t, err)
require.True(t, ppt.Equal(retP))
// smoke test
for i := 0; i < 25; i++ {
s := ed25519.Scalar.Random(crand.Reader)
pt := g.Mul(s)
cmprs := pt.ToAffineCompressed()
require.Equal(t, len(cmprs), 32)
retC, err := pt.FromAffineCompressed(cmprs)
require.NoError(t, err)
require.True(t, pt.Equal(retC))
un := pt.ToAffineUncompressed()
require.Equal(t, len(un), 64)
retU, err := pt.FromAffineUncompressed(un)
require.NoError(t, err)
require.True(t, pt.Equal(retU))
}
}
func TestPointEd25519Nil(t *testing.T) {
ed25519 := ED25519()
one := ed25519.Point.Generator()
require.Nil(t, one.Add(nil))
require.Nil(t, one.Sub(nil))
require.Nil(t, one.Mul(nil))
require.Nil(t, ed25519.Scalar.Random(nil))
require.False(t, one.Equal(nil))
_, err := ed25519.Scalar.SetBigInt(nil)
require.Error(t, err)
}
func TestPointEd25519SumOfProducts(t *testing.T) {
lhs := new(PointEd25519).Generator().Mul(new(ScalarEd25519).New(50))
points := make([]Point, 5)
for i := range points {
points[i] = new(PointEd25519).Generator()
}
scalars := []Scalar{
new(ScalarEd25519).New(8),
new(ScalarEd25519).New(9),
new(ScalarEd25519).New(10),
new(ScalarEd25519).New(11),
new(ScalarEd25519).New(12),
}
rhs := lhs.SumOfProducts(points, scalars)
require.NotNil(t, rhs)
require.True(t, lhs.Equal(rhs))
}
func TestPointEd25519VarTimeDoubleScalarBaseMult(t *testing.T) {
curve := ED25519()
h := curve.Point.Hash([]byte("TestPointEd25519VarTimeDoubleScalarBaseMult"))
a := curve.Scalar.New(23)
b := curve.Scalar.New(77)
H, ok := h.(*PointEd25519)
require.True(t, ok)
rhs := H.VarTimeDoubleScalarBaseMult(a, H, b)
lhs := h.Mul(a).Add(curve.Point.Generator().Mul(b))
require.True(t, lhs.Equal(rhs))
}
func toRSc(hx string) *ed.Scalar {
e, _ := hex.DecodeString(hx)
var data [32]byte
copy(data[:], e)
value, _ := new(ed.Scalar).SetCanonicalBytes(data[:])
return value
}
func toRPt(hx string) *ed.Point {
e, _ := hex.DecodeString(hx)
var data [32]byte
copy(data[:], e)
pt, _ := new(PointEd25519).FromAffineCompressed(data[:])
return pt.(*PointEd25519).value
}

280
crypto/core/curves/field.go Executable file
View File

@ -0,0 +1,280 @@
//
// Copyright Coinbase, Inc. All Rights Reserved.
//
// SPDX-License-Identifier: Apache-2.0
//
// Package curves: Field implementation IS NOT constant time as it leverages math/big for big number operations.
package curves
import (
"crypto/rand"
"encoding/json"
"fmt"
"io"
"math/big"
"sync"
)
var ed25519SubGroupOrderOnce sync.Once
var ed25519SubGroupOrder *big.Int
// Field is a finite field.
type Field struct {
*big.Int
}
// Element is a group element within a finite field.
type Element struct {
Modulus *Field `json:"modulus"`
Value *big.Int `json:"value"`
}
// ElementJSON is used in JSON<>Element conversions.
// For years, big.Int hasn't properly supported JSON unmarshaling
// https://github.com/golang/go/issues/28154
type ElementJSON struct {
Modulus string `json:"modulus"`
Value string `json:"value"`
}
// Marshal Element to JSON
func (x *Element) MarshalJSON() ([]byte, error) {
return json.Marshal(ElementJSON{
Modulus: x.Modulus.String(),
Value: x.Value.String(),
})
}
func (x *Element) UnmarshalJSON(bytes []byte) error {
var e ElementJSON
err := json.Unmarshal(bytes, &e)
if err != nil {
return err
}
// Convert the strings to big.Ints
modulus, ok := new(big.Int).SetString(e.Modulus, 10)
if !ok {
return fmt.Errorf("failed to unmarshal modulus string '%v' to big.Int", e.Modulus)
}
x.Modulus = &Field{modulus}
x.Value, ok = new(big.Int).SetString(e.Value, 10)
if !ok {
return fmt.Errorf("failed to unmarshal value string '%v' to big.Int", e.Value)
}
return nil
}
// The probability of returning true for a randomly chosen
// non-prime is at most ¼ⁿ. 64 is a widely used standard
// that is more than sufficient.
const millerRabinRounds = 64
// New is a constructor for a Field.
func NewField(modulus *big.Int) *Field {
// For our purposes we never expect to be dealing with a non-prime field. This provides some protection against
// accidentally doing that.
if !modulus.ProbablyPrime(millerRabinRounds) {
panic(fmt.Sprintf("modulus: %x is not a prime", modulus))
}
return &Field{modulus}
}
func newElement(field *Field, value *big.Int) *Element {
if !field.IsValid(value) {
panic(fmt.Sprintf("value: %x is not within field: %x", value, field))
}
return &Element{field, value}
}
// IsValid returns whether or not the value is within [0, modulus)
func (f Field) IsValid(value *big.Int) bool {
// value < modulus && value >= 0
return value.Cmp(f.Int) < 0 && value.Sign() >= 0
}
func (f Field) NewElement(value *big.Int) *Element {
return newElement(&f, value)
}
func (f Field) Zero() *Element {
return newElement(&f, big.NewInt(0))
}
func (f Field) One() *Element {
return newElement(&f, big.NewInt(1))
}
func (f Field) RandomElement(r io.Reader) (*Element, error) {
if r == nil {
r = rand.Reader
}
var randInt *big.Int
var err error
// Ed25519 needs to do special handling
// in case the value is used in
// Scalar multiplications with points
if f.Int.Cmp(Ed25519Order()) == 0 {
scalar := NewEd25519Scalar()
randInt, err = scalar.RandomWithReader(r)
} else {
// Read a random integer within the field. This is defined as [0, max) so we don't need to
// explicitly check it is within the field. If it is not, NewElement will panic anyways.
randInt, err = rand.Int(r, f.Int)
}
if err != nil {
return nil, err
}
return newElement(&f, randInt), nil
}
// ElementFromBytes initializes a new field element from big-endian bytes
func (f Field) ElementFromBytes(bytes []byte) *Element {
return newElement(&f, new(big.Int).SetBytes(bytes))
}
// ReducedElementFromBytes initializes a new field element from big-endian bytes and reduces it by
// the modulus of the field.
//
// WARNING: If this is used with cryptographic constructions which rely on a uniform distribution of
// values, this may introduce a bias to the value of the returned field element. This happens when
// the integer range of the provided bytes is not an integer multiple of the field order.
//
// Assume we are working in field which a modulus of 3 and the range of the uniform random bytes we
// provide as input is 5. Thus, the set of field elements is {0, 1, 2} and the set of integer values
// for the input bytes is: {0, 1, 2, 3, 4}. What is the distribution of the output values produced
// by this function?
//
// ReducedElementFromBytes(0) => 0
// ReducedElementFromBytes(1) => 1
// ReducedElementFromBytes(2) => 2
// ReducedElementFromBytes(3) => 0
// ReducedElementFromBytes(4) => 1
//
// For a value space V and random value v, a uniform distribution is defined as P[V = v] = 1/|V|
// where |V| is to the order of the field. Using the results from above, we see that P[v = 0] = 2/5,
// P[v = 1] = 2/5, and P[v = 2] = 1/5. For a uniform distribution we would expect these to each be
// equal to 1/3. As they do not, this does not return uniform output for that example.
//
// To see why this is okay if the range is a multiple of the field order, change the input range to
// 6 and notice that now each output has a probability of 2/6 = 1/3, and the output is uniform.
func (f Field) ReducedElementFromBytes(bytes []byte) *Element {
value := new(big.Int).SetBytes(bytes)
value.Mod(value, f.Int)
return newElement(&f, value)
}
func (x Element) Field() *Field {
return x.Modulus
}
// Add returns the sum x+y
func (x Element) Add(y *Element) *Element {
x.validateFields(y)
sum := new(big.Int).Add(x.Value, y.Value)
sum.Mod(sum, x.Modulus.Int)
return newElement(x.Modulus, sum)
}
// Sub returns the difference x-y
func (x Element) Sub(y *Element) *Element {
x.validateFields(y)
difference := new(big.Int).Sub(x.Value, y.Value)
difference.Mod(difference, x.Modulus.Int)
return newElement(x.Modulus, difference)
}
// Neg returns the field negation
func (x Element) Neg() *Element {
z := new(big.Int).Neg(x.Value)
z.Mod(z, x.Modulus.Int)
return newElement(x.Modulus, z)
}
// Mul returns the product x*y
func (x Element) Mul(y *Element) *Element {
x.validateFields(y)
product := new(big.Int).Mul(x.Value, y.Value)
product.Mod(product, x.Modulus.Int)
return newElement(x.Modulus, product)
}
// Div returns the quotient x/y
func (x Element) Div(y *Element) *Element {
x.validateFields(y)
yInv := new(big.Int).ModInverse(y.Value, x.Modulus.Int)
quotient := new(big.Int).Mul(x.Value, yInv)
quotient.Mod(quotient, x.Modulus.Int)
return newElement(x.Modulus, quotient)
}
// Pow computes x^y reduced by the modulus
func (x Element) Pow(y *Element) *Element {
x.validateFields(y)
return newElement(x.Modulus, new(big.Int).Exp(x.Value, y.Value, x.Modulus.Int))
}
func (x Element) Invert() *Element {
return newElement(x.Modulus, new(big.Int).ModInverse(x.Value, x.Modulus.Int))
}
func (x Element) Sqrt() *Element {
return newElement(x.Modulus, new(big.Int).ModSqrt(x.Value, x.Modulus.Int))
}
// BigInt returns value as a big.Int
func (x Element) BigInt() *big.Int {
return x.Value
}
// Bytes returns the value as bytes
func (x Element) Bytes() []byte {
return x.BigInt().Bytes()
}
// IsEqual returns x == y
func (x Element) IsEqual(y *Element) bool {
if !x.isEqualFields(y) {
return false
}
return x.Value.Cmp(y.Value) == 0
}
// Clone returns a new copy of the element
func (x Element) Clone() *Element {
return x.Modulus.ElementFromBytes(x.Bytes())
}
func (x Element) isEqualFields(y *Element) bool {
return x.Modulus.Int.Cmp(y.Modulus.Int) == 0
}
func (x Element) validateFields(y *Element) {
if !x.isEqualFields(y) {
panic("fields must match for valid binary operation")
}
}
// SubgroupOrder returns the order of the Ed25519 base Point.
func Ed25519Order() *big.Int {
ed25519SubGroupOrderOnce.Do(func() {
order, ok := new(big.Int).SetString(
"1000000000000000000000000000000014DEF9DEA2F79CD65812631A5CF5D3ED",
16,
)
if !ok {
panic("invalid hex string provided. This should never happen as it is constant.")
}
ed25519SubGroupOrder = order
})
return ed25519SubGroupOrder
}

301
crypto/core/curves/field_test.go Executable file
View File

@ -0,0 +1,301 @@
//
// Copyright Coinbase, Inc. All Rights Reserved.
//
// SPDX-License-Identifier: Apache-2.0
//
package curves
import (
"encoding/json"
"errors"
"fmt"
"math/big"
"testing"
"github.com/stretchr/testify/require"
)
var (
one = big.NewInt(1)
modulus, modulusOk = new(big.Int).SetString(
"1000000000000000000000000000000014DEF9DEA2F79CD65812631A5CF5D3ED",
16,
)
oneBelowModulus = zero().Sub(modulus, one)
oneAboveModulus = zero().Add(modulus, one)
field25519 = NewField(modulus)
)
type buggedReader struct{}
func (r buggedReader) Read(p []byte) (n int, err error) {
return 0, errors.New("EOF")
}
func zero() *big.Int {
return new(big.Int)
}
func assertElementZero(t *testing.T, e *Element) {
require.Equal(t, zero().Bytes(), e.Bytes())
}
type binaryOperation func(*Element) *Element
func assertUnequalFieldsPanic(t *testing.T, b binaryOperation) {
altField := NewField(big.NewInt(23))
altElement := altField.NewElement(one)
require.PanicsWithValue(
t,
"fields must match for valid binary operation",
func() { b(altElement) },
)
}
func TestFieldModulus(t *testing.T) {
require.True(t, modulusOk)
}
func TestNewField(t *testing.T) {
require.PanicsWithValue(
t,
fmt.Sprintf("modulus: %x is not a prime", oneBelowModulus),
func() { NewField(oneBelowModulus) },
)
require.NotPanics(
t,
func() { NewField(modulus) },
)
}
func TestNewElement(t *testing.T) {
require.PanicsWithValue(
t,
fmt.Sprintf("value: %x is not within field: %x", modulus, field25519.Int),
func() { newElement(field25519, modulus) },
)
require.NotPanics(
t,
func() { newElement(field25519, oneBelowModulus) },
)
}
func TestElementIsValid(t *testing.T) {
require.False(t, field25519.IsValid(zero().Neg(one)))
require.False(t, field25519.IsValid(modulus))
require.False(t, field25519.IsValid(oneAboveModulus))
require.True(t, field25519.IsValid(oneBelowModulus))
}
func TestFieldNewElement(t *testing.T) {
element := field25519.NewElement(oneBelowModulus)
require.Equal(t, oneBelowModulus, element.Value)
require.Equal(t, field25519, element.Field())
}
func TestZeroElement(t *testing.T) {
require.Equal(t, zero(), field25519.Zero().Value)
require.Equal(t, field25519, field25519.Zero().Field())
}
func TestOneElement(t *testing.T) {
require.Equal(t, field25519.One().Value, one)
require.Equal(t, field25519.One().Field(), field25519)
}
func TestRandomElement(t *testing.T) {
randomElement1, err := field25519.RandomElement(nil)
require.NoError(t, err)
randomElement2, err := field25519.RandomElement(nil)
require.NoError(t, err)
randomElement3, err := field25519.RandomElement(new(buggedReader))
require.Error(t, err)
require.Equal(t, field25519, randomElement1.Field())
require.Equal(t, field25519, randomElement2.Field())
require.NotEqual(t, randomElement1.Value, randomElement2.Value)
require.Nil(t, randomElement3)
}
func TestElementFromBytes(t *testing.T) {
element := field25519.ElementFromBytes(oneBelowModulus.Bytes())
require.Equal(t, field25519, element.Field())
require.Equal(t, oneBelowModulus, element.Value)
}
func TestReducedElementFromBytes(t *testing.T) {
element := field25519.ReducedElementFromBytes(oneBelowModulus.Bytes())
require.Equal(t, field25519, element.Field())
require.Equal(t, oneBelowModulus, element.Value)
element = field25519.ReducedElementFromBytes(oneAboveModulus.Bytes())
require.Equal(t, field25519, element.Field())
require.Equal(t, one, element.Value)
}
func TestAddElement(t *testing.T) {
element1 := field25519.NewElement(one)
element2 := field25519.NewElement(big.NewInt(2))
element3 := field25519.NewElement(oneBelowModulus)
element4 := &Element{field25519, modulus}
require.Equal(t, element2, element1.Add(element1))
require.Equal(t, big.NewInt(3), element1.Add(element2).Value)
require.Equal(t, big.NewInt(3), element2.Add(element1).Value)
require.Equal(t, one, element1.Add(element4).Value)
require.Equal(t, one, element3.Add(element2).Value)
assertElementZero(t, element1.Add(element3))
assertUnequalFieldsPanic(t, element1.Add)
}
func TestSubElement(t *testing.T) {
element1 := field25519.NewElement(one)
element2 := field25519.NewElement(big.NewInt(2))
element3 := field25519.NewElement(oneBelowModulus)
element4 := &Element{field25519, modulus}
assertElementZero(t, element1.Sub(element1))
require.Equal(t, element3, element1.Sub(element2))
require.Equal(t, element1, element2.Sub(element1))
require.Equal(t, element1, element1.Sub(element4))
require.Equal(t, element3, element4.Sub(element1))
require.Equal(t, element1, element4.Sub(element3))
require.Equal(t, element3, element3.Sub(element4))
assertUnequalFieldsPanic(t, element1.Sub)
}
func TestMulElement(t *testing.T) {
element1 := field25519.NewElement(one)
element2 := field25519.NewElement(big.NewInt(2))
element3 := field25519.NewElement(oneBelowModulus)
element4 := field25519.NewElement(zero())
expectedProduct, ok := new(big.Int).SetString(
"1000000000000000000000000000000014def9dea2f79cd65812631a5cf5d3eb",
16,
)
require.True(t, ok)
assertElementZero(t, element1.Mul(element4))
assertElementZero(t, element4.Mul(element1))
require.Equal(t, element3, element1.Mul(element3))
require.Equal(t, element3, element3.Mul(element1))
require.Equal(t, expectedProduct, element3.Mul(element2).Value)
require.Equal(t, expectedProduct, element2.Mul(element3).Value)
assertUnequalFieldsPanic(t, element1.Mul)
}
func TestDivElement(t *testing.T) {
element1 := field25519.NewElement(one)
element2 := field25519.NewElement(big.NewInt(2))
element3 := field25519.NewElement(oneBelowModulus)
element4 := field25519.NewElement(zero())
expectedQuotient1, ok := new(big.Int).SetString(
"80000000000000000000000000000000a6f7cef517bce6b2c09318d2e7ae9f6",
16,
)
require.True(t, ok)
expectedQuotient2, ok := new(big.Int).SetString(
"1000000000000000000000000000000014def9dea2f79cd65812631a5cf5d3eb",
16,
)
require.True(t, ok)
assertElementZero(t, element4.Div(element3))
require.Equal(t, element3, element3.Div(element1))
require.Equal(t, expectedQuotient1, element3.Div(element2).Value)
require.Equal(t, expectedQuotient2, element2.Div(element3).Value)
require.Panics(t, func() { element3.Div(element4) })
assertUnequalFieldsPanic(t, element1.Div)
}
func TestIsEqualElement(t *testing.T) {
element1 := field25519.NewElement(oneBelowModulus)
element2 := field25519.NewElement(big.NewInt(23))
element3 := field25519.NewElement(oneBelowModulus)
altField := NewField(big.NewInt(23))
altElement1 := altField.NewElement(one)
require.False(t, element1.IsEqual(element2))
require.True(t, element1.IsEqual(element3))
require.True(t, element1.IsEqual(element1))
require.False(t, element1.IsEqual(altElement1))
}
func TestBigIntElement(t *testing.T) {
element := field25519.NewElement(oneBelowModulus)
require.Equal(t, oneBelowModulus, element.BigInt())
}
func TestBytesElement(t *testing.T) {
element := field25519.NewElement(oneBelowModulus)
require.Equal(
t,
[]byte{
0x10, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x14, 0xde, 0xf9, 0xde, 0xa2,
0xf7, 0x9c, 0xd6, 0x58, 0x12, 0x63, 0x1a, 0x5c, 0xf5,
0xd3, 0xec,
},
element.Bytes(),
)
}
func TestCloneElement(t *testing.T) {
element := field25519.NewElement(oneBelowModulus)
clone := element.Clone()
require.Equal(t, clone, element)
clone.Value.Add(one, one)
require.NotEqual(t, clone, element)
}
// Tests un/marshaling Element
func TestElementMarshalJsonRoundTrip(t *testing.T) {
reallyBigInt1, ok := new(big.Int).SetString("12365234878725472538962348629568356835892346729834725643857832", 10)
require.True(t, ok)
reallyBigInt2, ok := new(big.Int).SetString("123652348787DEF9DEA2F79CD65812631A5CF5D3ED46729834725643857832", 16)
require.True(t, ok)
ins := []*Element{
newElement(field25519, big.NewInt(300)),
newElement(field25519, big.NewInt(300000)),
newElement(field25519, big.NewInt(12812798)),
newElement(field25519, big.NewInt(17)),
newElement(field25519, big.NewInt(5066680)),
newElement(field25519, big.NewInt(3005)),
newElement(field25519, big.NewInt(317)),
newElement(field25519, big.NewInt(323)),
newElement(field25519, reallyBigInt1),
newElement(field25519, reallyBigInt2),
newElement(field25519, oneBelowModulus),
}
// Run all the tests!
for _, in := range ins {
bytes, err := json.Marshal(in)
require.NoError(t, err)
require.NotNil(t, bytes)
// Unmarshal and test
out := &Element{}
err = json.Unmarshal(bytes, &out)
require.NoError(t, err)
require.NotNil(t, out)
require.NotNil(t, out.Modulus)
require.NotNil(t, out.Value)
require.Equal(t, in.Modulus.Bytes(), out.Modulus.Bytes())
require.Equal(t, in.Value.Bytes(), out.Value.Bytes())
}
}

Some files were not shown because too many files have changed in this diff Show More