mirror of
https://github.com/onsonr/sonr.git
synced 2025-03-10 04:57:08 +00:00
Fix/hway db driver (#1198)
* fix/hway-db-driver * fix/hway-db-driver * chore(scripts): add tx indexer and psql connection to test * fix(scripts): make testnet setup more robust and configurable
This commit is contained in:
parent
36191d2bd4
commit
9d86dad38d
6
Makefile
6
Makefile
@ -301,7 +301,10 @@ testnet-basic: setup-testnet
|
||||
sh-testnet: mod-tidy
|
||||
CHAIN_ID="sonr-testnet-1" BLOCK_TIME="1000ms" CLEAN=true sh scripts/test_node.sh
|
||||
|
||||
.PHONY: setup-testnet set-testnet-configs testnet testnet-basic sh-testnet
|
||||
dop-testnet: mod-tidy
|
||||
sh scripts/test_dop_node.sh
|
||||
|
||||
.PHONY: setup-testnet set-testnet-configs testnet testnet-basic sh-testnet dop-testnet
|
||||
|
||||
###############################################################################
|
||||
### generation ###
|
||||
@ -319,7 +322,6 @@ gen-sqlc: init-env
|
||||
gen-templ: init-env
|
||||
@templ generate
|
||||
|
||||
|
||||
###############################################################################
|
||||
### custom builds ###
|
||||
###############################################################################
|
||||
|
82
Taskfile.yml
82
Taskfile.yml
@ -1,4 +1,4 @@
|
||||
version: '3'
|
||||
version: "3"
|
||||
|
||||
vars:
|
||||
VERSION:
|
||||
@ -11,20 +11,13 @@ vars:
|
||||
sh: uname -s
|
||||
TASKS:
|
||||
sh: task -l
|
||||
DOPPLER_TOKEN:
|
||||
sh: skate get DOPPLER_NETWORK
|
||||
tasks:
|
||||
default:
|
||||
cmds:
|
||||
- gh run ls -L 3
|
||||
- gum format -- "# Sonr ({{.OS}}-{{.VERSION}})" "({{.COMMIT}}) {{.ROOT_DIR}}" "### {{ .TASKS }}"
|
||||
silent: true
|
||||
|
||||
clean:
|
||||
desc: Clean build artifacts
|
||||
cmds:
|
||||
- sh ./scripts/init_env.sh
|
||||
- rm -rf ./build
|
||||
- rm -rf ./dist
|
||||
- rm -rf ./static
|
||||
- gum format -- "# Sonr ({{.OS}}-{{.VERSION}})" "({{.COMMIT}}) {{.ROOT_DIR}}" "### {{ .TASKS }}"
|
||||
silent: true
|
||||
|
||||
build:
|
||||
@ -38,6 +31,21 @@ tasks:
|
||||
- task: build:sonr
|
||||
- task: build:hway
|
||||
|
||||
start:
|
||||
desc: Start the node
|
||||
silent: true
|
||||
cmds:
|
||||
- task: build:hway
|
||||
- task: start:darwin
|
||||
- task: start:linux
|
||||
|
||||
stop:
|
||||
desc: Stop the node
|
||||
silent: true
|
||||
cmds:
|
||||
- task: stop:darwin
|
||||
- task: stop:linux
|
||||
|
||||
build:motr:
|
||||
internal: true
|
||||
silent: true
|
||||
@ -62,15 +70,53 @@ tasks:
|
||||
- sudo -u postgres psql -f ./deploy/sink/db_seed.sql
|
||||
- sudo -u postgres psql -d chainindex -f ./deploy/sink/schema_indexer.sql
|
||||
|
||||
reset:db:
|
||||
desc: Reset the database
|
||||
silent: true
|
||||
platforms:
|
||||
- linux
|
||||
cmd: gum confirm "Reset chainindex, highway, and matrixhs?" --default=false --affirmative "Yes" && sudo -u postgres psql -f ./deploy/sink/db_reset.sql|| echo "No selected"
|
||||
|
||||
init:ipfs:
|
||||
desc: Initialize the ipfs node
|
||||
silent: true
|
||||
cmds:
|
||||
- sh ./scripts/ipfs_config.sh
|
||||
|
||||
start:darwin:
|
||||
internal: true
|
||||
silent: true
|
||||
platforms:
|
||||
- darwin
|
||||
cmd: make start
|
||||
|
||||
start:linux:
|
||||
internal: true
|
||||
silent: true
|
||||
platforms:
|
||||
- linux
|
||||
cmd: make start-uds
|
||||
|
||||
stop:darwin:
|
||||
internal: true
|
||||
silent: true
|
||||
platforms:
|
||||
- darwin
|
||||
cmd: make stop
|
||||
|
||||
stop:linux:
|
||||
internal: true
|
||||
silent: true
|
||||
platforms:
|
||||
- linux
|
||||
cmds:
|
||||
- make stop-uds
|
||||
- task: reset:chainindex
|
||||
|
||||
clean:
|
||||
internal: true
|
||||
cmds:
|
||||
- sh ./scripts/init_env.sh
|
||||
- rm -rf ./build
|
||||
- rm -rf ./dist
|
||||
- rm -rf ./static
|
||||
silent: true
|
||||
|
||||
reset:chainindex:
|
||||
internal: true
|
||||
platforms:
|
||||
- linux
|
||||
cmd: sudo -u postgres psql -f ./deploy/sink/reset_chainindex.sql
|
||||
|
@ -5,6 +5,7 @@ import (
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/onsonr/sonr/pkg/common"
|
||||
"github.com/onsonr/sonr/pkg/gateway"
|
||||
@ -21,6 +22,7 @@ var (
|
||||
sonrRPCURL string // Sonr RPC URL (default localhost:26657)
|
||||
|
||||
psqlHost string // PostgresSQL Host Flag
|
||||
psqlPort string // PostgresSQL Port Flag
|
||||
psqlUser string // PostgresSQL User Flag
|
||||
psqlPass string // PostgresSQL Password Flag
|
||||
psqlDB string // PostgresSQL Database Flag
|
||||
@ -61,8 +63,9 @@ func rootCmd() *cobra.Command {
|
||||
cmd.Flags().StringVar(&sonrGrpcURL, "sonr-grpc-url", "localhost:9090", "Sonr gRPC URL")
|
||||
cmd.Flags().StringVar(&sonrRPCURL, "sonr-rpc-url", "localhost:26657", "Sonr RPC URL")
|
||||
cmd.Flags().StringVar(&psqlHost, "psql-host", "localhost", "PostgresSQL Host")
|
||||
cmd.Flags().StringVar(&psqlUser, "psql-user", "postgres", "PostgresSQL User")
|
||||
cmd.Flags().StringVar(&psqlPass, "psql-pass", "postgres", "PostgresSQL Password")
|
||||
cmd.Flags().StringVar(&psqlPort, "psql-port", "5432", "PostgresSQL Port")
|
||||
cmd.Flags().StringVar(&psqlUser, "psql-user", "highway_user", "PostgresSQL User")
|
||||
cmd.Flags().StringVar(&psqlPass, "psql-pass", "highway_password123", "PostgresSQL Password")
|
||||
cmd.Flags().StringVar(&psqlDB, "psql-db", "highway", "PostgresSQL Database")
|
||||
return cmd
|
||||
}
|
||||
@ -71,5 +74,20 @@ func formatPsqlDSN() string {
|
||||
if psqlHost == "" {
|
||||
return ""
|
||||
}
|
||||
return fmt.Sprintf("host=%s user=%s password=%s dbname=%s sslmode=disable", psqlHost, psqlUser, psqlPass, psqlDB)
|
||||
|
||||
host := psqlHost
|
||||
port := "5432"
|
||||
|
||||
if parts := strings.Split(psqlHost, ":"); len(parts) == 2 {
|
||||
host = parts[0]
|
||||
port = parts[1]
|
||||
}
|
||||
|
||||
dsn := fmt.Sprintf("host=%s port=%s user=%s password=%s dbname=%s sslmode=verify-full",
|
||||
host, port, psqlUser, psqlPass, psqlDB)
|
||||
|
||||
log.Printf("Attempting to connect to PostgreSQL with DSN: host=%s port=%s user=%s dbname=%s",
|
||||
host, port, psqlUser, psqlDB) // Don't log the password
|
||||
|
||||
return dsn
|
||||
}
|
||||
|
@ -8,7 +8,7 @@ import (
|
||||
|
||||
"github.com/jackc/pgx/v5"
|
||||
config "github.com/onsonr/sonr/internal/config/hway"
|
||||
"github.com/onsonr/sonr/internal/models/drivers/hwayorm"
|
||||
hwayorm "github.com/onsonr/sonr/pkg/gateway/orm"
|
||||
)
|
||||
|
||||
// main is the entry point for the application
|
||||
|
@ -12,7 +12,6 @@ import (
|
||||
|
||||
func main() {
|
||||
rootCmd := NewRootCmd()
|
||||
rootCmd.AddCommand(newPklInitCmd())
|
||||
if err := svrcmd.Execute(rootCmd, "", app.DefaultNodeHome); err != nil {
|
||||
log.NewLogger(rootCmd.OutOrStderr()).Error("failure when running app", "err", err)
|
||||
os.Exit(1)
|
||||
|
@ -1,101 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/apple/pkl-go/pkl"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var configDir string
|
||||
|
||||
func newPklInitCmd() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "init-pkl",
|
||||
Short: "Initialize the Sonrd configuration using PKL",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ctx := context.Background()
|
||||
evaluator, err := pkl.NewEvaluator(ctx, pkl.PreconfiguredOptions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer evaluator.Close()
|
||||
|
||||
appPath := formatConfigPath(cmd, "app.toml")
|
||||
configPath := formatConfigPath(cmd, "config.toml")
|
||||
|
||||
// Create app.toml
|
||||
if err := createAppToml(evaluator, appPath); err != nil {
|
||||
cmd.PrintErrf("Failed to create app.toml: %v\n", err)
|
||||
return err
|
||||
}
|
||||
cmd.Printf("Successfully created %s\n", appPath)
|
||||
|
||||
// Create config.toml
|
||||
if err := createConfigToml(evaluator, configPath); err != nil {
|
||||
cmd.PrintErrf("Failed to create config.toml: %v\n", err)
|
||||
return err
|
||||
}
|
||||
cmd.Printf("Successfully created %s\n", configPath)
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
cmd.Flags().StringVar(&configDir, "config-dir", "~/.sonr/config", "Path to where pkl files should be output")
|
||||
return cmd
|
||||
}
|
||||
|
||||
func createAppToml(evaluator pkl.Evaluator, path string) error {
|
||||
appSource := pkl.UriSource("https://pkl.sh/sonr.chain/0.0.2/App.pkl")
|
||||
res, err := evaluator.EvaluateOutputText(context.Background(), appSource)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Printf("res: %s", res)
|
||||
return writeConfigFile(path, res)
|
||||
}
|
||||
|
||||
func createConfigToml(evaluator pkl.Evaluator, path string) error {
|
||||
configSource := pkl.UriSource("https://pkl.sh/sonr.chain/0.0.2/Config.pkl")
|
||||
res, err := evaluator.EvaluateOutputText(context.Background(), configSource)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Printf("res: %s", res)
|
||||
return writeConfigFile(path, res)
|
||||
}
|
||||
|
||||
func formatConfigPath(cmd *cobra.Command, fileName string) string {
|
||||
configDir := cmd.Flag("config-dir").Value.String()
|
||||
// Expand home directory if needed
|
||||
if configDir[:2] == "~/" {
|
||||
home, err := os.UserHomeDir()
|
||||
if err == nil {
|
||||
configDir = filepath.Join(home, configDir[2:])
|
||||
}
|
||||
}
|
||||
return filepath.Join(configDir, fileName)
|
||||
}
|
||||
|
||||
func writeConfigFile(path string, content string) error {
|
||||
// Create the directory path if it doesn't exist
|
||||
dir := filepath.Dir(path)
|
||||
if err := os.MkdirAll(dir, 0o755); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check if file already exists
|
||||
if _, err := os.Stat(path); err == nil {
|
||||
// File exists, create backup
|
||||
backupPath := path + ".backup"
|
||||
if err := os.Rename(path, backupPath); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Write the new config file
|
||||
return os.WriteFile(path, []byte(content), 0o644)
|
||||
}
|
@ -1,202 +0,0 @@
|
||||
import "https://pkl.sh/ipfs.net/0.0.1/Config.pkl"
|
||||
|
||||
API {
|
||||
HTTPHeaders {
|
||||
`Access-Control-Allow-Origin` = new { "*" }
|
||||
}
|
||||
}
|
||||
|
||||
Addresses {
|
||||
API = "/ip4/127.0.0.1/tcp/5001"
|
||||
Gateway = "/ip4/127.0.0.1/tcp/8080"
|
||||
Swarm = new {
|
||||
"/ip4/0.0.0.0/tcp/4001"
|
||||
"/ip6/::/tcp/4001"
|
||||
"/ip4/0.0.0.0/udp/4001/quic"
|
||||
"/ip6/::/udp/4001/quic"
|
||||
}
|
||||
}
|
||||
|
||||
Bootstrap {
|
||||
"/dnsaddr/bootstrap.libp2p.io/p2p/QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa"
|
||||
"/dnsaddr/bootstrap.libp2p.io/p2p/QmbLHAnMoJPWSCR5Zhtx6BHJX9KiKNN6tpvbUcqanj75Nb"
|
||||
"/dnsaddr/bootstrap.libp2p.io/p2p/QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt"
|
||||
"/ip4/104.131.131.82/tcp/4001/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ"
|
||||
"/ip4/104.131.131.82/udp/4001/quic/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ"
|
||||
"/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN"
|
||||
}
|
||||
|
||||
Datastore {
|
||||
BloomFilterSize = 0
|
||||
GCPeriod = "1h"
|
||||
HashOnRead = false
|
||||
StorageGCWatermark = 90
|
||||
StorageMax = "10GB"
|
||||
Spec = new {
|
||||
mounts = new {
|
||||
new {
|
||||
prefix = "flatfs.datastore"
|
||||
child = new {
|
||||
path = "blocks"
|
||||
shardFunc = "/repo/flatfs/shard/v1/next-to-last/2"
|
||||
type = "flatfs"
|
||||
sync = true
|
||||
}
|
||||
mountpoint = "/blocks"
|
||||
type = "measure"
|
||||
}
|
||||
new {
|
||||
prefix = "leveldb.datastore"
|
||||
child = new {
|
||||
compression = "none"
|
||||
path = "datastore"
|
||||
type = "levelds"
|
||||
}
|
||||
mountpoint = "/"
|
||||
type = "measure"
|
||||
}
|
||||
}
|
||||
type = "mount"
|
||||
}
|
||||
}
|
||||
|
||||
Discovery {
|
||||
MDNS = new {
|
||||
Enabled = true
|
||||
}
|
||||
}
|
||||
|
||||
Experimental {
|
||||
StrategicProviding = false
|
||||
UrlstoreEnabled = false
|
||||
AcceleratedDHTClient = false
|
||||
GraphsyncEnabled = false
|
||||
FilestoreEnabled = false
|
||||
Libp2pStreamMounting = false
|
||||
P2pHttpProxy = false
|
||||
}
|
||||
|
||||
Gateway {
|
||||
HTTPHeaders = new {
|
||||
`Access-Control-Allow-Headers` = new {
|
||||
"X-Requested-With"
|
||||
"Range"
|
||||
"User-Agent"
|
||||
}
|
||||
`Access-Control-Allow-Methods` = new { "GET" }
|
||||
`Access-Control-Allow-Origin` = new { "*" }
|
||||
}
|
||||
NoDNSLink = false
|
||||
NoFetch = false
|
||||
PublicGateways = null
|
||||
RootRedirect = ""
|
||||
Writable = false
|
||||
}
|
||||
|
||||
Identity {
|
||||
PrivKey = "CAESQP0FRhYf5Nvxg0wrbN+VTK7kWdgy+3AKoxU3vNH0K9FHVpXyx6/mHKyCaPjqI11YsHUW0B2ZODGROPafyS6IeWY="
|
||||
PeerID = "12D3KooWFeMr1tHFs8WAF11rKDULJbmKg9rE5aVhYJU23oC7pqjB"
|
||||
}
|
||||
|
||||
Ipns {
|
||||
RecordLifetime = ""
|
||||
RepublishPeriod = ""
|
||||
ResolveCacheSize = 128
|
||||
}
|
||||
|
||||
Migration {
|
||||
Keep = ""
|
||||
}
|
||||
|
||||
Mounts {
|
||||
IPNS = "/ipns"
|
||||
FuseAllowOther = false
|
||||
IPFS = "/ipfs"
|
||||
}
|
||||
|
||||
Peering {
|
||||
Peers = new {
|
||||
new {
|
||||
Addrs = new { "/dnsaddr/node-1.ingress.cloudflare-ipfs.com" }
|
||||
ID = "QmcFf2FH3CEgTNHeMRGhN7HNHU1EXAxoEk6EFuSyXCsvRE"
|
||||
}
|
||||
new {
|
||||
Addrs = new { "/dnsaddr/node-2.ingress.cloudflare-ipfs.com" }
|
||||
ID = "QmcFmLd5ySfk2WZuJ1mfSWLDjdmHZq7rSAua4GoeSQfs1z"
|
||||
}
|
||||
new {
|
||||
Addrs = new { "/dnsaddr/node-3.ingress.cloudflare-ipfs.com" }
|
||||
ID = "QmcfFmzSDVbwexQ9Au2pt5YEXHK5xajwgaU6PpkbLWerMa"
|
||||
}
|
||||
new {
|
||||
Addrs = new { "/dnsaddr/node-4.ingress.cloudflare-ipfs.com" }
|
||||
ID = "QmcfJeB3Js1FG7T8YaZATEiaHqNKVdQfybYYkbT1knUswx"
|
||||
}
|
||||
new {
|
||||
Addrs = new { "/dnsaddr/node-5.ingress.cloudflare-ipfs.com" }
|
||||
ID = "QmcfVvzK4tMdFmpJjEKDUoqRgP4W9FnmJoziYX5GXJJ8eZ"
|
||||
}
|
||||
new {
|
||||
Addrs = new { "/dnsaddr/node-6.ingress.cloudflare-ipfs.com" }
|
||||
ID = "QmcfZD3VKrUxyP9BbyUnZDpbqDnT7cQ4WjPP8TRLXaoE7G"
|
||||
}
|
||||
new {
|
||||
Addrs = new { "/dnsaddr/node-7.ingress.cloudflare-ipfs.com" }
|
||||
ID = "QmcfZP2LuW4jxviTeG8fi28qjnZScACb8PEgHAc17ZEri3"
|
||||
}
|
||||
new {
|
||||
Addrs = new { "/dnsaddr/node-8.ingress.cloudflare-ipfs.com" }
|
||||
ID = "QmcfgsJsMtx6qJb74akCw1M24X1zFwgGo11h1cuhwQjtJP"
|
||||
}
|
||||
new {
|
||||
Addrs = new { "/dnsaddr/node-9.ingress.cloudflare-ipfs.com" }
|
||||
ID = "Qmcfr2FC7pFzJbTSDfYaSy1J8Uuy8ccGLeLyqJCKJvTHMi"
|
||||
}
|
||||
new {
|
||||
Addrs = new { "/dnsaddr/node-10.ingress.cloudflare-ipfs.com" }
|
||||
ID = "QmcfR3V5YAtHBzxVACWCzXTt26SyEkxdwhGJ6875A8BuWx"
|
||||
}
|
||||
new {
|
||||
Addrs = new { "/dnsaddr/node-11.ingress.cloudflare-ipfs.com" }
|
||||
ID = "Qmcfuo1TM9uUiJp6dTbm915Rf1aTqm3a3dnmCdDQLHgvL5"
|
||||
}
|
||||
new {
|
||||
Addrs = new { "/dnsaddr/node-12.ingress.cloudflare-ipfs.com" }
|
||||
ID = "QmcfV2sg9zaq7UUHVCGuSvT2M2rnLBAPsiE79vVyK3Cuev"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Provider {
|
||||
Strategy = ""
|
||||
}
|
||||
|
||||
Pubsub {
|
||||
Router = ""
|
||||
DisableSigning = false
|
||||
}
|
||||
|
||||
Reprovider {
|
||||
Strategy = "all"
|
||||
Interval = "12h"
|
||||
}
|
||||
|
||||
Routing {
|
||||
Methods = null
|
||||
Routers = null
|
||||
Type = "dht"
|
||||
}
|
||||
|
||||
Swarm {
|
||||
AddrFilters = null
|
||||
ConnMgr = new {}
|
||||
DisableBandwidthMetrics = false
|
||||
DisableNatPortMap = false
|
||||
RelayClient = new {}
|
||||
ResourceMgr = new {}
|
||||
Transports = new {
|
||||
Multiplexers = new {}
|
||||
Network = new {}
|
||||
Security = new {}
|
||||
}
|
||||
}
|
@ -1,3 +1,76 @@
|
||||
amends "https://pkl.sh/sonr.chain/0.0.2/App.pkl"
|
||||
import "package://pkg.pkl-lang.org/pkl-pantry/pkl.toml@1.0.0#/toml.pkl"
|
||||
|
||||
`minimum-gas-prices` = "0usnr"
|
||||
`query-gas-limit` = "0"
|
||||
pruning = "default"
|
||||
`pruning-keep-recent` = "0"
|
||||
`pruning-interval` = "0"
|
||||
`halt-height` = 0
|
||||
`halt-time` = 0
|
||||
`min-retain-blocks` = 0
|
||||
`inter-block-cache` = true
|
||||
`index-events` = new Listing {}
|
||||
`iavl-cache-size` = 781250
|
||||
`iavl-disable-fastnode` = false
|
||||
`app-db-backend` = ""
|
||||
|
||||
telemetry = new {
|
||||
`service-name` = ""
|
||||
enabled = true
|
||||
`enable-hostname` = true
|
||||
`enable-hostname-label` = false
|
||||
`enable-service-label` = false
|
||||
`prometheus-retention-time` = 60
|
||||
`global-labels` = new Listing {
|
||||
new Listing {
|
||||
"chain_id"
|
||||
"sonr-testnet-1"
|
||||
}
|
||||
}
|
||||
`metrics-sink` = ""
|
||||
`statsd-addr` = ""
|
||||
`datadog-hostname` = ""
|
||||
}
|
||||
|
||||
api = new {
|
||||
enable = true
|
||||
swagger = false
|
||||
address = "tcp://0.0.0.0:1317"
|
||||
`max-open-connections` = 1000
|
||||
`rpc-read-timeout` = 10
|
||||
`rpc-write-timeout` = 0
|
||||
`rpc-max-body-bytes` = 1000000
|
||||
`enabled-unsafe-cors` = false
|
||||
}
|
||||
|
||||
grpc = new {
|
||||
enable = true
|
||||
address = "0.0.0.0:9090"
|
||||
`max-recv-msg-size` = "10485760"
|
||||
`max-send-msg-size` = "2147483647"
|
||||
}
|
||||
|
||||
`grpc-web` = new {
|
||||
enable = true
|
||||
}
|
||||
|
||||
`state-sync` = new {
|
||||
`snapshot-interval` = 0
|
||||
`snapshot-keep-recent` = 2
|
||||
}
|
||||
|
||||
streaming = new {
|
||||
abci = new {
|
||||
keys = new Listing {}
|
||||
plugin = ""
|
||||
`stop-node-on-err` = true
|
||||
}
|
||||
}
|
||||
|
||||
mempool = new {
|
||||
`max-txs` = 5000
|
||||
}
|
||||
|
||||
output {
|
||||
renderer = new toml.Renderer {}
|
||||
}
|
||||
|
@ -1,3 +1,143 @@
|
||||
amends "https://pkl.sh/sonr.chain/0.0.2/Genesis.pkl"
|
||||
import "package://pkg.pkl-lang.org/pkl-pantry/pkl.toml@1.0.0#/toml.pkl"
|
||||
|
||||
version = "0.38.12"
|
||||
|
||||
proxy_app = "tcp://127.0.0.1:26658"
|
||||
moniker = read("env:MONIKER") ?? "florence"
|
||||
db_backend = "goleveldb"
|
||||
db_dir = "data"
|
||||
log_level = "info"
|
||||
log_format = "plain"
|
||||
|
||||
genesis_file = "config/genesis.json"
|
||||
priv_validator_key_file = "config/priv_validator_key.json"
|
||||
priv_validator_state_file = "data/priv_validator_state.json"
|
||||
priv_validator_laddr = ""
|
||||
node_key_file = "config/node_key.json"
|
||||
abci = "socket"
|
||||
filter_peers = false
|
||||
|
||||
rpc = new {
|
||||
laddr = "tcp://0.0.0.0:26657"
|
||||
cors_allowed_origins = new Listing {}
|
||||
cors_allowed_methods = new Listing {
|
||||
"HEAD"
|
||||
"GET"
|
||||
"POST"
|
||||
}
|
||||
cors_allowed_headers = new Listing {
|
||||
"Origin"
|
||||
"Accept"
|
||||
"Content-Type"
|
||||
"X-Requested-With"
|
||||
"X-Server-Time"
|
||||
}
|
||||
grpc_laddr = ""
|
||||
grpc_max_open_connections = 900
|
||||
unsafe = false
|
||||
max_open_connections = 900
|
||||
max_subscription_clients = 100
|
||||
max_subscriptions_per_client = 5
|
||||
experimental_subscription_buffer_size = 200
|
||||
experimental_websocket_write_buffer_size = 200
|
||||
experimental_close_on_slow_client = false
|
||||
timeout_broadcast_tx_commit = "10s"
|
||||
max_request_batch_size = 10
|
||||
max_body_bytes = 1000000
|
||||
max_header_bytes = 1048576
|
||||
tls_cert_file = ""
|
||||
tls_key_file = ""
|
||||
pprof_laddr = "localhost:6060"
|
||||
}
|
||||
|
||||
p2p = new {
|
||||
laddr = "tcp://0.0.0.0:26656"
|
||||
external_address = ""
|
||||
seeds = ""
|
||||
persistent_peers = ""
|
||||
addr_book_file = "config/addrbook.json"
|
||||
addr_book_strict = true
|
||||
max_num_inbound_peers = 40
|
||||
max_num_outbound_peers = 10
|
||||
unconditional_peer_ids = ""
|
||||
persistent_peers_max_dial_period = "0s"
|
||||
flush_throttle_timeout = "100ms"
|
||||
max_packet_msg_payload_size = 1024
|
||||
send_rate = 5120000
|
||||
recv_rate = 5120000
|
||||
pex = true
|
||||
seed_mode = false
|
||||
private_peer_ids = ""
|
||||
allow_duplicate_ip = false
|
||||
handshake_timeout = "20s"
|
||||
dial_timeout = "3s"
|
||||
}
|
||||
|
||||
mempool = new {
|
||||
type = "flood"
|
||||
recheck = true
|
||||
recheck_timeout = "1s"
|
||||
broadcast = true
|
||||
wal_dir = ""
|
||||
size = 5000
|
||||
max_txs_bytes = 1073741824
|
||||
cache_size = 10000
|
||||
`keep-invalid-txs-in-cache` = false
|
||||
max_tx_bytes = 1048576
|
||||
max_batch_bytes = 0
|
||||
experimental_max_gossip_connections_to_persistent_peers = 0
|
||||
experimental_max_gossip_connections_to_non_persistent_peers = 0
|
||||
}
|
||||
|
||||
statesync = new {
|
||||
enable = false
|
||||
rpc_servers = ""
|
||||
trust_height = 0
|
||||
trust_hash = ""
|
||||
trust_period = "168h0m0s"
|
||||
discovery_time = "15s"
|
||||
temp_dir = ""
|
||||
chunk_request_timeout = "10s"
|
||||
chunk_fetchers = "4"
|
||||
}
|
||||
|
||||
blocksync = new {
|
||||
version = "v0"
|
||||
}
|
||||
|
||||
consensus = new {
|
||||
wal_file = "data/cs.wal/wal"
|
||||
timeout_propose = "3s"
|
||||
timeout_propose_delta = "500ms"
|
||||
timeout_prevote = "1s"
|
||||
timeout_prevote_delta = "500ms"
|
||||
timeout_precommit = "1s"
|
||||
timeout_precommit_delta = "500ms"
|
||||
timeout_commit = "5000ms"
|
||||
double_sign_check_height = 0
|
||||
skip_timeout_commit = false
|
||||
create_empty_blocks = true
|
||||
create_empty_blocks_interval = "0s"
|
||||
peer_gossip_sleep_duration = "100ms"
|
||||
peer_query_maj23_sleep_duration = "2s"
|
||||
}
|
||||
|
||||
storage = new {
|
||||
discard_abci_responses = false
|
||||
}
|
||||
|
||||
tx_index = new {
|
||||
indexer = read("env:TX_INDEX_INDEXER") ?? "kv"
|
||||
`psql-conn` = read("env:TX_INDEX_PSQL_CONN") ?? ""
|
||||
}
|
||||
|
||||
instrumentation = new {
|
||||
prometheus = false
|
||||
prometheus_listen_addr = ":26660"
|
||||
max_open_connections = 3
|
||||
namespace = "cometbft"
|
||||
}
|
||||
|
||||
output {
|
||||
renderer = new toml.Renderer {}
|
||||
}
|
||||
|
@ -2,11 +2,11 @@ version: "0.6"
|
||||
|
||||
processes:
|
||||
sonr:
|
||||
namespace: devnet
|
||||
command: "make sh-testnet"
|
||||
namespace: testnet
|
||||
command: "make dop-testnet"
|
||||
|
||||
hway:
|
||||
namespace: devnet
|
||||
namespace: testnet
|
||||
command: "./build/hway"
|
||||
depends:
|
||||
- sonr
|
||||
|
@ -1,19 +0,0 @@
|
||||
-- Connect to a different database first (postgres) since we can't drop a database while connected to it
|
||||
\c postgres;
|
||||
|
||||
-- Terminate all connections to the databases
|
||||
SELECT pg_terminate_backend(pid)
|
||||
FROM pg_stat_activity
|
||||
WHERE datname IN ('chainindex', 'highway', 'matrixhs')
|
||||
AND pid <> pg_backend_pid();
|
||||
|
||||
-- Drop the databases if they exist
|
||||
DROP DATABASE IF EXISTS chainindex;
|
||||
DROP DATABASE IF EXISTS highway;
|
||||
DROP DATABASE IF EXISTS matrixhs;
|
||||
|
||||
-- Drop the users if they exist
|
||||
DROP USER IF EXISTS chainindex_user;
|
||||
DROP USER IF EXISTS highway_user;
|
||||
DROP USER IF EXISTS matrixhs_user;
|
||||
|
78
deploy/sink/reset_chainindex.sql
Normal file
78
deploy/sink/reset_chainindex.sql
Normal file
@ -0,0 +1,78 @@
|
||||
-- Switch to postgres database first
|
||||
\c postgres;
|
||||
|
||||
-- Terminate existing connections
|
||||
SELECT pg_terminate_backend(pid)
|
||||
FROM pg_stat_activity
|
||||
WHERE datname = 'chainindex'
|
||||
AND pid <> pg_backend_pid();
|
||||
|
||||
-- Drop and recreate database and user
|
||||
DROP DATABASE IF EXISTS chainindex;
|
||||
DROP USER IF EXISTS chainindex_user;
|
||||
|
||||
CREATE USER chainindex_user WITH PASSWORD 'chainindex_password123';
|
||||
CREATE DATABASE chainindex;
|
||||
|
||||
-- Connect to the new database
|
||||
\c chainindex;
|
||||
|
||||
-- Create the blocks table
|
||||
CREATE TABLE blocks (
|
||||
rowid BIGSERIAL PRIMARY KEY,
|
||||
height BIGINT NOT NULL,
|
||||
chain_id VARCHAR NOT NULL,
|
||||
created_at TIMESTAMPTZ NOT NULL,
|
||||
UNIQUE (height, chain_id)
|
||||
);
|
||||
|
||||
CREATE INDEX idx_blocks_height_chain ON blocks(height, chain_id);
|
||||
|
||||
CREATE TABLE tx_results (
|
||||
rowid BIGSERIAL PRIMARY KEY,
|
||||
block_id BIGINT NOT NULL REFERENCES blocks(rowid),
|
||||
index INTEGER NOT NULL,
|
||||
created_at TIMESTAMPTZ NOT NULL,
|
||||
tx_hash VARCHAR NOT NULL,
|
||||
tx_result BYTEA NOT NULL,
|
||||
UNIQUE (block_id, index)
|
||||
);
|
||||
|
||||
CREATE TABLE events (
|
||||
rowid BIGSERIAL PRIMARY KEY,
|
||||
block_id BIGINT NOT NULL REFERENCES blocks(rowid),
|
||||
tx_id BIGINT NULL REFERENCES tx_results(rowid),
|
||||
type VARCHAR NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE attributes (
|
||||
event_id BIGINT NOT NULL REFERENCES events(rowid),
|
||||
key VARCHAR NOT NULL,
|
||||
composite_key VARCHAR NOT NULL,
|
||||
value VARCHAR NULL,
|
||||
UNIQUE (event_id, key)
|
||||
);
|
||||
|
||||
CREATE VIEW event_attributes AS
|
||||
SELECT block_id, tx_id, type, key, composite_key, value
|
||||
FROM events LEFT JOIN attributes ON (events.rowid = attributes.event_id);
|
||||
|
||||
CREATE VIEW block_events AS
|
||||
SELECT blocks.rowid as block_id, height, chain_id, type, key, composite_key, value
|
||||
FROM blocks JOIN event_attributes ON (blocks.rowid = event_attributes.block_id)
|
||||
WHERE event_attributes.tx_id IS NULL;
|
||||
|
||||
CREATE VIEW tx_events AS
|
||||
SELECT height, index, chain_id, type, key, composite_key, value, tx_results.created_at
|
||||
FROM blocks JOIN tx_results ON (blocks.rowid = tx_results.block_id)
|
||||
JOIN event_attributes ON (tx_results.rowid = event_attributes.tx_id)
|
||||
WHERE event_attributes.tx_id IS NOT NULL;
|
||||
|
||||
-- Grant all necessary privileges
|
||||
GRANT ALL PRIVILEGES ON DATABASE chainindex TO chainindex_user;
|
||||
GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO chainindex_user;
|
||||
GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public TO chainindex_user;
|
||||
GRANT ALL PRIVILEGES ON ALL FUNCTIONS IN SCHEMA public TO chainindex_user;
|
||||
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL PRIVILEGES ON TABLES TO chainindex_user;
|
||||
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL PRIVILEGES ON SEQUENCES TO chainindex_user;
|
||||
|
@ -6,7 +6,7 @@ import (
|
||||
"github.com/labstack/echo/v4"
|
||||
echomiddleware "github.com/labstack/echo/v4/middleware"
|
||||
config "github.com/onsonr/sonr/internal/config/hway"
|
||||
"github.com/onsonr/sonr/internal/models/drivers/hwayorm"
|
||||
hwayorm "github.com/onsonr/sonr/pkg/gateway/orm"
|
||||
"github.com/onsonr/sonr/pkg/common"
|
||||
"github.com/onsonr/sonr/pkg/gateway/middleware"
|
||||
"github.com/onsonr/sonr/pkg/gateway/routes"
|
||||
|
@ -4,7 +4,7 @@ import (
|
||||
"net/http"
|
||||
|
||||
"github.com/labstack/echo/v4"
|
||||
"github.com/onsonr/sonr/internal/models/drivers/hwayorm"
|
||||
hwayorm "github.com/onsonr/sonr/pkg/gateway/orm"
|
||||
)
|
||||
|
||||
func ListCredentials(c echo.Context, handle string) ([]*CredentialDescriptor, error) {
|
||||
|
@ -5,8 +5,8 @@ import (
|
||||
"github.com/medama-io/go-useragent"
|
||||
"github.com/onsonr/sonr/crypto/mpc"
|
||||
"github.com/onsonr/sonr/internal/config/hway"
|
||||
"github.com/onsonr/sonr/internal/models/drivers/hwayorm"
|
||||
"github.com/onsonr/sonr/pkg/common"
|
||||
hwayorm "github.com/onsonr/sonr/pkg/gateway/orm"
|
||||
)
|
||||
|
||||
type GatewayContext struct {
|
||||
|
@ -5,7 +5,7 @@ import (
|
||||
|
||||
"github.com/labstack/echo/v4"
|
||||
"github.com/onsonr/sonr/internal/context"
|
||||
repository "github.com/onsonr/sonr/internal/models/drivers/hwayorm"
|
||||
hwayorm "github.com/onsonr/sonr/pkg/gateway/orm"
|
||||
)
|
||||
|
||||
func CheckHandleUnique(c echo.Context, handle string) bool {
|
||||
@ -24,7 +24,7 @@ func CheckHandleUnique(c echo.Context, handle string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func CreateProfile(c echo.Context) (*repository.Profile, error) {
|
||||
func CreateProfile(c echo.Context) (*hwayorm.Profile, error) {
|
||||
ctx, ok := c.(*GatewayContext)
|
||||
if !ok {
|
||||
return nil, echo.NewHTTPError(http.StatusInternalServerError, "Profile Context not found")
|
||||
@ -33,7 +33,7 @@ func CreateProfile(c echo.Context) (*repository.Profile, error) {
|
||||
handle := c.FormValue("handle")
|
||||
origin := c.FormValue("origin")
|
||||
name := c.FormValue("name")
|
||||
profile, err := ctx.dbq.InsertProfile(bgCtx(), repository.InsertProfileParams{
|
||||
profile, err := ctx.dbq.InsertProfile(bgCtx(), hwayorm.InsertProfileParams{
|
||||
Address: address,
|
||||
Handle: handle,
|
||||
Origin: origin,
|
||||
@ -44,7 +44,7 @@ func CreateProfile(c echo.Context) (*repository.Profile, error) {
|
||||
}
|
||||
// Update session with profile id
|
||||
sid := GetSessionID(c)
|
||||
_, err = ctx.dbq.UpdateSessionWithProfileID(bgCtx(), repository.UpdateSessionWithProfileIDParams{
|
||||
_, err = ctx.dbq.UpdateSessionWithProfileID(bgCtx(), hwayorm.UpdateSessionWithProfileIDParams{
|
||||
ProfileID: profile.ID,
|
||||
ID: sid,
|
||||
})
|
||||
@ -54,7 +54,7 @@ func CreateProfile(c echo.Context) (*repository.Profile, error) {
|
||||
return &profile, nil
|
||||
}
|
||||
|
||||
func UpdateProfile(c echo.Context) (*repository.Profile, error) {
|
||||
func UpdateProfile(c echo.Context) (*hwayorm.Profile, error) {
|
||||
ctx, ok := c.(*GatewayContext)
|
||||
if !ok {
|
||||
return nil, echo.NewHTTPError(http.StatusInternalServerError, "Profile Context not found")
|
||||
@ -62,7 +62,7 @@ func UpdateProfile(c echo.Context) (*repository.Profile, error) {
|
||||
address := c.FormValue("address")
|
||||
handle := c.FormValue("handle")
|
||||
name := c.FormValue("name")
|
||||
profile, err := ctx.dbq.UpdateProfile(bgCtx(), repository.UpdateProfileParams{
|
||||
profile, err := ctx.dbq.UpdateProfile(bgCtx(), hwayorm.UpdateProfileParams{
|
||||
Address: address,
|
||||
Handle: handle,
|
||||
Name: name,
|
||||
@ -73,7 +73,7 @@ func UpdateProfile(c echo.Context) (*repository.Profile, error) {
|
||||
return &profile, nil
|
||||
}
|
||||
|
||||
func ReadProfile(c echo.Context) (*repository.Profile, error) {
|
||||
func ReadProfile(c echo.Context) (*hwayorm.Profile, error) {
|
||||
ctx, ok := c.(*GatewayContext)
|
||||
if !ok {
|
||||
return nil, echo.NewHTTPError(http.StatusInternalServerError, "Profile Context not found")
|
||||
|
@ -7,7 +7,7 @@ import (
|
||||
"github.com/labstack/echo/v4"
|
||||
"github.com/medama-io/go-useragent"
|
||||
ctx "github.com/onsonr/sonr/internal/context"
|
||||
"github.com/onsonr/sonr/internal/models/drivers/hwayorm"
|
||||
hwayorm "github.com/onsonr/sonr/pkg/gateway/orm"
|
||||
"github.com/segmentio/ksuid"
|
||||
)
|
||||
|
||||
|
21
scripts/test_dop_node.sh
Executable file
21
scripts/test_dop_node.sh
Executable file
@ -0,0 +1,21 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
# Ensure we're in the right directory
|
||||
ROOT_DIR=$(git rev-parse --show-toplevel)
|
||||
cd $ROOT_DIR
|
||||
|
||||
DOPPLER_TOKEN=$(skate get DOPPLER_NETWORK)
|
||||
|
||||
ACC0=$(doppler secrets get KEY0_NAME --plain)
|
||||
ACC1=$(doppler secrets get KEY1_NAME --plain)
|
||||
MNEM0=$(doppler secrets get KEY0_MNEMONIC --plain)
|
||||
MNEM1=$(doppler secrets get KEY1_MNEMONIC --plain)
|
||||
CHAIN_ID=$(doppler secrets get CHAIN_ID --plain)
|
||||
TX_INDEX_INDEXER=$(doppler secrets get TX_INDEXER --plain)
|
||||
TX_INDEX_PSQL_CONN=$(doppler secrets get TX_PSQL_CONN --plain)
|
||||
|
||||
# Run the node setup with all variables properly exported
|
||||
KEY0_NAME=$ACC0 KEY0_MNEMONIC=$MNEM0 KEY1_NAME=$ACC1 KEY1_MNEMONIC=$MNEM1 CHAIN_ID=$CHAIN_ID TX_INDEX_INDEXER=$TX_INDEX_INDEXER TX_INDEX_PSQL_CONN=$TX_INDEX_PSQL_CONN sh scripts/test_node.sh
|
||||
|
@ -5,9 +5,13 @@
|
||||
# CHAIN_ID="local-1" HOME_DIR="~/.core" BLOCK_TIME="1000ms" CLEAN=true sh scripts/test_node.sh
|
||||
# CHAIN_ID="local-2" HOME_DIR="~/.core" CLEAN=true RPC=36657 REST=2317 PROFF=6061 P2P=36656 GRPC=8090 GRPC_WEB=8091 ROSETTA=8081 BLOCK_TIME="500ms" sh scripts/test_node.sh
|
||||
|
||||
export KEY="user1"
|
||||
export KEY2="user2"
|
||||
export KEY0_NAME=${KEY0_NAME:-"user0"}
|
||||
export KEY0_MNEMONIC=${KEY0_MNEMONIC:-"decorate bright ozone fork gallery riot bus exhaust worth way bone indoor calm squirrel merry zero scheme cotton until shop any excess stage laundry"}
|
||||
export KEY1_NAME="user2"
|
||||
export KEY1_MNEMONIC=${KEY1_MNEMONIC:-"wealth flavor believe regret funny network recall kiss grape useless pepper cram hint member few certain unveil rather brick bargain curious require crowd raise"}
|
||||
|
||||
export TX_INDEX_INDEXER=${TX_INDEX_INDEXER:-"kv"}
|
||||
export TX_INDEX_PSQL_CONN=${TX_INDEX_PSQL_CONN:-""}
|
||||
export CHAIN_ID=${CHAIN_ID:-"sonr-testnet-1"}
|
||||
export MONIKER="florence"
|
||||
export KEYALGO="secp256k1"
|
||||
@ -26,6 +30,8 @@ export GRPC_WEB=${GRPC_WEB:-"9091"}
|
||||
export ROSETTA=${ROSETTA:-"8080"}
|
||||
export BLOCK_TIME=${BLOCK_TIME:-"5s"}
|
||||
|
||||
ROOT_DIR=$(git rev-parse --show-toplevel)
|
||||
|
||||
# if which binary does not exist, exit
|
||||
if [ -z `which $BINARY` ]; then
|
||||
echo "Ensure $BINARY is installed and in your PATH"
|
||||
@ -43,7 +49,6 @@ set_config() {
|
||||
}
|
||||
set_config
|
||||
|
||||
|
||||
from_scratch () {
|
||||
# Fresh install on current branch
|
||||
make install
|
||||
@ -59,15 +64,14 @@ from_scratch () {
|
||||
set_config
|
||||
|
||||
add_key() {
|
||||
echo "Adding key: $1"
|
||||
key=$1
|
||||
mnemonic=$2
|
||||
echo $mnemonic | BINARY keys add $key --keyring-backend $KEYRING --algo $KEYALGO --recover
|
||||
}
|
||||
|
||||
# idx1efd63aw40lxf3n4mhf7dzhjkr453axur9vjt6y
|
||||
add_key $KEY "decorate bright ozone fork gallery riot bus exhaust worth way bone indoor calm squirrel merry zero scheme cotton until shop any excess stage laundry"
|
||||
# idx1hj5fveer5cjtn4wd6wstzugjfdxzl0xpecp0nd
|
||||
add_key $KEY2 "wealth flavor believe regret funny network recall kiss grape useless pepper cram hint member few certain unveil rather brick bargain curious require crowd raise"
|
||||
echo "$KEY0_MNEMONIC" | BINARY keys add $KEY0_NAME --keyring-backend $KEYRING --algo $KEYALGO --recover
|
||||
echo "$KEY1_MNEMONIC" | BINARY keys add $KEY1_NAME --keyring-backend $KEYRING --algo $KEYALGO --recover
|
||||
|
||||
# chain initial setup
|
||||
BINARY init $MONIKER --chain-id $CHAIN_ID --default-denom $DENOM
|
||||
@ -106,11 +110,11 @@ from_scratch () {
|
||||
update_test_genesis '.app_state["poa"]["params"]["admins"]=["idx10d07y265gmmuvt4z0w9aw880jnsr700j9kqcfa"]'
|
||||
|
||||
# Allocate genesis accounts
|
||||
BINARY genesis add-genesis-account $KEY 10000000$DENOM,900snr --keyring-backend $KEYRING
|
||||
BINARY genesis add-genesis-account $KEY2 10000000$DENOM,800snr --keyring-backend $KEYRING
|
||||
BINARY genesis add-genesis-account $KEY0_NAME 10000000$DENOM,900snr --keyring-backend $KEYRING
|
||||
BINARY genesis add-genesis-account $KEY1_NAME 10000000$DENOM,800snr --keyring-backend $KEYRING
|
||||
|
||||
# Sign genesis transaction
|
||||
BINARY genesis gentx $KEY 1000000$DENOM --keyring-backend $KEYRING --chain-id $CHAIN_ID
|
||||
BINARY genesis gentx $KEY0_NAME 1000000$DENOM --keyring-backend $KEYRING --chain-id $CHAIN_ID
|
||||
|
||||
BINARY genesis collect-gentxs
|
||||
|
||||
@ -124,13 +128,18 @@ from_scratch () {
|
||||
|
||||
# check if CLEAN is not set to false
|
||||
if [ "$CLEAN" != "false" ]; then
|
||||
|
||||
echo "Starting from a clean state"
|
||||
from_scratch
|
||||
fi
|
||||
|
||||
echo "Starting node..."
|
||||
|
||||
# Tx Index
|
||||
if [ "$TX_INDEX_PSQL_CONN" != "" ]; then
|
||||
awk -v conn="$TX_INDEX_PSQL_CONN" '/^psql-conn = / {$0 = "psql-conn = \"" conn "\""} 1' $HOME_DIR/config/config.toml > temp && mv temp $HOME_DIR/config/config.toml
|
||||
fi
|
||||
|
||||
|
||||
# Opens the RPC endpoint to outside connections
|
||||
sed -i 's/laddr = "tcp:\/\/127.0.0.1:26657"/c\laddr = "tcp:\/\/0.0.0.0:'$RPC'"/g' $HOME_DIR/config/config.toml
|
||||
sed -i 's/cors_allowed_origins = \[\]/cors_allowed_origins = \["\*"\]/g' $HOME_DIR/config/config.toml
|
||||
@ -149,6 +158,7 @@ sed -i 's/address = "localhost:9091"/address = "0.0.0.0:'$GRPC_WEB'"/g' $HOME_DI
|
||||
|
||||
# Rosetta Api
|
||||
sed -i 's/address = ":8080"/address = "0.0.0.0:'$ROSETTA'"/g' $HOME_DIR/config/app.toml
|
||||
sed -i 's/indexer = "kv"/indexer = "'$TX_INDEX_INDEXER'"/g' $HOME_DIR/config/config.toml
|
||||
|
||||
# Faster blocks
|
||||
sed -i 's/timeout_commit = "5s"/timeout_commit = "'$BLOCK_TIME'"/g' $HOME_DIR/config/config.toml
|
||||
|
Loading…
x
Reference in New Issue
Block a user