Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore(deps): bump fast-xml-parser from 4.4.0 to 4.4.1 #37

Closed
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 15 additions & 0 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -46,3 +46,18 @@ FROM run-common as server
COPY --from=build-server /usr/local/bin/tpodserver /usr/local/bin/tpodserver

ENTRYPOINT ["tpodserver"]

## autoscaler: ##

FROM build-common as build-autoscaler

COPY cmd/autoscaler ./cmd/autoscaler
RUN --mount=type=cache,target=/root/.cache/go-build go build -v -o /usr/local/bin/autoscaler ./cmd/autoscaler

FROM run-common as autoscaler

COPY --from=build-autoscaler /usr/local/bin/autoscaler /usr/local/bin/autoscaler

ENTRYPOINT ["autoscaler"]


33 changes: 33 additions & 0 deletions cmd/autoscaler/main.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
package main

import (
"fmt"
"log"
"net/http"

pbcon "github.com/comrade-coop/apocryph/pkg/proto/protoconnect"
tpraft "github.com/comrade-coop/apocryph/pkg/raft"
"golang.org/x/net/http2"
"golang.org/x/net/http2/h2c"
)

func main() {
mux := http.NewServeMux()
self, err := tpraft.NewPeer(fmt.Sprintf("/ip4/0.0.0.0/tcp/%v", RAFT_P2P_PORT))
if err != nil {
fmt.Println("Failed creating p2p node")
return
}
fmt.Printf("PEER ID: %v\n", self.ID())
server := &AutoScalerServer{self: self}
server.mainLoop = setAppGatewayExample
path, handler := pbcon.NewAutoscalerServiceHandler(server)
mux.Handle(path, handler)
log.Println("Autoscaler RPC Server Started")
http.ListenAndServe(
":8080",
// Use h2c so we can serve HTTP/2 without TLS.
h2c.NewHandler(mux, &http2.Server{}),
)

}
215 changes: 215 additions & 0 deletions cmd/autoscaler/server.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,215 @@
package main

import (
"context"
"fmt"
"log"
"net/http"
"net/url"
"strings"
"time"

"connectrpc.com/connect"
pb "github.com/comrade-coop/apocryph/pkg/proto"
pbcon "github.com/comrade-coop/apocryph/pkg/proto/protoconnect"
tpraft "github.com/comrade-coop/apocryph/pkg/raft"
"github.com/hashicorp/raft"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/multiformats/go-multiaddr"
)

const RAFT_P2P_PORT = 32500
const RAFT_PATH = ""

type AutoScalerServer struct {
pbcon.UnimplementedAutoscalerServiceHandler
node *tpraft.RaftNode
store *tpraft.KVStore
peers []string
started bool
self host.Host
nodeGateway string
mainLoop func(*AutoScalerServer)
}

func (s *AutoScalerServer) TriggerNode(c context.Context, req *connect.Request[pb.ConnectClusterRequest]) (*connect.Response[pb.TriggerNodeResponse], error) {
if s.started == false {
log.Println("Node Triggered")
go s.BoostrapCluster(req)
s.started = true
}
return connect.NewResponse(&pb.TriggerNodeResponse{PeerID: s.self.ID().String()}), nil
}

func (s *AutoScalerServer) BoostrapCluster(req *connect.Request[pb.ConnectClusterRequest]) error {

peerIDs, err := s.FetchPeerIDsFromServers(req)
if err != nil {
return fmt.Errorf("Failed to fetch PeerIDs: %v", err)
}

var peers []*peer.AddrInfo
for serverAddr, addr := range peerIDs {
addr := GetMultiAddr(serverAddr, addr)
if addr == "" {
return fmt.Errorf("Failed to parse server address: %v", serverAddr)
}

maddr, err := multiaddr.NewMultiaddr(addr)
if err != nil {
return fmt.Errorf("Failed to parse multiaddr: %v: %v", addr, err)
}

peerInfo, err := peer.AddrInfoFromP2pAddr(maddr)
if err != nil {
return fmt.Errorf("Failed to extract peer info from %v, Error: %v", addr, err)
}

peers = append(peers, peerInfo)
log.Printf("Added Peer %v ID:%s \n", peerInfo.Addrs, peerInfo.ID.String())
}

node, err := tpraft.NewRaftNode(s.self, peers, RAFT_PATH)
if err != nil {
log.Println("Error:Could not Creat Raft Node")
return fmt.Errorf("Failed Creating Raft Node %v\n", err)
}

// create the KVStore
store, err := tpraft.NewKVStore(node)
s.node = node
s.store = store
s.peers = req.Msg.Servers
s.nodeGateway = req.Msg.NodeGateway

err = s.waitLeaderElection(req.Msg.Timeout)
if err != nil {
return err
}

return nil
}

func (s *AutoScalerServer) ConnectCluster(c context.Context, req *connect.Request[pb.ConnectClusterRequest]) (*connect.Response[pb.ConnectClusterResponse], error) {
log.Println("Forming a Raft Cluster with the following providers:", req.Msg.Servers)
s.started = true
err := s.BoostrapCluster(req)
if err != nil {
return connect.NewResponse(&pb.ConnectClusterResponse{
Success: false,
Error: fmt.Sprintf("Failed Bootstraping Cluster: %v\n", err),
}), nil
}
response := &pb.ConnectClusterResponse{Success: true}
return connect.NewResponse(response), nil
}

func (s *AutoScalerServer) FetchPeerIDsFromServers(req *connect.Request[pb.ConnectClusterRequest]) (map[string]string, error) {
peerIDs := make(map[string]string)

for _, addr := range req.Msg.Servers {
client := pbcon.NewAutoscalerServiceClient(
http.DefaultClient,
addr)

resp, err := client.TriggerNode(context.Background(), req)
if err != nil {
return nil, fmt.Errorf("failed to get PeerID from server %v: %w", addr, err)
}
peerIDs[addr] = resp.Msg.PeerID
}

return peerIDs, nil
}

func (s *AutoScalerServer) waitLeaderElection(timeout uint32) error {

log.Printf("Waiting for leader election with %v seoncds timout ...", timeout)
time.Sleep(5 * time.Second)

go s.watchNewStates()

// using Raft.leaderCh() wont help because it does not count
// first leader election as a leadership change, therefore from the docs,
// this is the way to detect the new leader
obsCh := make(chan raft.Observation, 1)
observer := raft.NewObserver(obsCh, false, nil)
s.node.Raft.RegisterObserver(observer)
defer s.node.Raft.DeregisterObserver(observer)

ticker := time.NewTicker(time.Second)
defer ticker.Stop()

timeoutCh := time.After(time.Duration(timeout) * time.Second)
for {
select {
case obs := <-obsCh:
switch obs.Data.(type) {
case raft.RaftState:
if leaderAddr, _ := s.node.Raft.LeaderWithID(); leaderAddr != "" {
fmt.Printf("Leader Elected: %v\n", leaderAddr)
go s.mainLoop(s)
return nil
}
}
case <-ticker.C:
if leaderAddr, _ := s.node.Raft.LeaderWithID(); leaderAddr != "" {
fmt.Printf("Leader Elected: %v\n", leaderAddr)
go s.mainLoop(s)
return nil
}
case <-timeoutCh:
log.Println("timed out waiting for leader")
return fmt.Errorf("Timed out waiting for leadership election")
}
}
}

func (s *AutoScalerServer) watchNewStates() {
for range s.node.Consensus.Subscribe() {
newState, err := s.node.Consensus.GetCurrentState()
if err != nil {
log.Printf("Failed getting current state %v\n", err)
}
log.Printf("State Changed, New State: %v\n", newState)
}
}

// example of main loop setting the value of a test domain with the current node
// gateway every 5 seconds
func setAppGatewayExample(s *AutoScalerServer) {
log.Println("Starting Main Loop:")
for {
if s.node.Raft.State() == raft.Leader {
log.Println("Setting the domain value to the current apocryph node gateway")
err := s.store.Set("www.test.com", "http://localhost:8080")
// leadership could be changed right before setting the value
if _, ok := err.(*tpraft.NotLeaderError); ok {
newLeaderAddr, newLeaderID := s.node.Raft.LeaderWithID()
log.Printf("Leadership changed to %v:%v", newLeaderAddr, newLeaderID)
} else if err != nil {
log.Printf("Failed setting key:%v: %v\n", "www.test.com", err)
}
}
time.Sleep(10 * time.Second)
}
}

func GetMultiAddr(addr, peerID string) string {
// Parse the URL
parsedURL, err := url.Parse(addr)
if err != nil {
fmt.Println("Error parsing URL:", err)
return ""
}
hostPort := parsedURL.Host
parts := strings.Split(hostPort, ":")
if len(parts) != 2 {
log.Println("Invalid host:port format")
return ""
}
ip := parts[0]
// Change the port to the RAFT_P2P_PORT
return fmt.Sprintf("/ip4/%s/tcp/%v/p2p/%s", ip, RAFT_P2P_PORT, peerID)
}
2 changes: 2 additions & 0 deletions cmd/tpodserver/registry.go
Original file line number Diff line number Diff line change
Expand Up @@ -199,6 +199,8 @@ var registerCmd = &cobra.Command{
},
}

// automatically injects current ipfs peer id in the multiaddrs field from the
// config file provided, the peerID is needed for p2p connection over ipfs
func getHostInfo(ctx context.Context, ipfs *rpc.HttpApi) (*pb.HostInfo, error) {
if hostInfoContents == "" {
return nil, fmt.Errorf("Empty host info")
Expand Down
51 changes: 51 additions & 0 deletions cmd/trustedpods/autoscaler.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
package main

import (
"context"
"fmt"
"log"
"net/http"

"connectrpc.com/connect"
"github.com/spf13/cobra"

pb "github.com/comrade-coop/apocryph/pkg/proto"
pbcon "github.com/comrade-coop/apocryph/pkg/proto/protoconnect"
)

var nodesAddrs []string
var raftPath string
var appManifest string
var baseUrl string

var autoscalerCmd = &cobra.Command{
Use: "autoscale",
Short: "Configure the deployed autonomous autoscaler",
RunE: func(cmd *cobra.Command, args []string) error {
client := pbcon.NewAutoscalerServiceClient(
http.DefaultClient,
baseUrl)
log.Printf("Connecting Clusters:%v\n", nodesAddrs)
request := connect.NewRequest(&pb.ConnectClusterRequest{NodeGateway: baseUrl, Servers: nodesAddrs, Timeout: 30})
// Add the Host that is needed for the ingress to route the autoscaler pod
request.Header().Set("Host", "autoscaler.local")
log.Printf("Sending Request to: %v\n", baseUrl)
response, err := client.ConnectCluster(context.Background(), request)
if err != nil {
return fmt.Errorf("Error connecting to cluster:%v", err)
}
log.Printf("Received response: %v", response)
return nil

},
}

func init() {
// this could be removed if we store the list of providers directly in the pod configuration
autoscalerCmd.Flags().StringSliceVarP(&nodesAddrs, "providers", "p", []string{}, "List of All providers cluster ip addresses that the autoscaler will use to redeploy your application in case of a failure")
autoscalerCmd.Flags().StringVar(&raftPath, "path", "", "Optional: path where raft will save it's state (Default is In Memory)")
autoscalerCmd.Flags().StringVar(&appManifest, "manifest", "", "path to application manifest to autoscale")
autoscalerCmd.Flags().StringVar(&baseUrl, "url", "", "apocryph node gateway running autoscaler instance")

rootCmd.AddCommand(autoscalerCmd)
}
2 changes: 2 additions & 0 deletions cmd/trustedpods/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,7 @@ var _ = func() error {
deploymentFlags.StringVar(&providerPeer, "provider", "", "provider peer id")
deploymentFlags.StringVar(&providerEthAddress, "provider-eth", "", "provider public address")
deploymentFlags.Int64Var(&expirationOffset, "token-expiration", 10, "authentication token expires after token-expiration seconds (expired after 10 seconds by default)")
deploymentFlags.StringVar(&ipfsApi, "ipfs", "/ip4/127.0.0.1/tcp/5001", "multiaddr where the ipfs/kubo api can be accessed")

uploadFlags.StringVar(&ipfsApi, "ipfs", "/ip4/127.0.0.1/tcp/5001", "multiaddr where the ipfs/kubo api can be accessed")
uploadFlags.BoolVar(&uploadImages, "upload-images", true, "upload images")
Expand All @@ -73,6 +74,7 @@ var _ = func() error {

syncFlags.AddFlag(uploadFlags.Lookup("ipfs"))

registryFlags.StringVar(&ipfsApi, "ipfs", "/ip4/127.0.0.1/tcp/5001", "multiaddr where the ipfs/kubo api can be accessed")
registryFlags.StringVar(&registryContractAddress, "registry-contract", "", "registry contract address")
registryFlags.StringVar(&tokenContractAddress, "token-contract", "", "token contract address")
registryFlags.AddFlag(fundFlags.Lookup("payment-contract"))
Expand Down
Loading