Skip to content

[WIP] feat: Rewrite plugin in Go #31

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 4 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
coverage/
dist/
node_modules/
go/
*~
\#*
.\#*
Expand Down
22 changes: 21 additions & 1 deletion .rc
Original file line number Diff line number Diff line change
Expand Up @@ -27,4 +27,24 @@ function yarn() {
else
docker run ${DOCKER_RUN_OPTS} --entrypoint=yarn ${DOCKER_IMAGE} "$@"
fi
}
}

function go() {
DOCKER_IMAGE=go
DOCKER_RUN_OPTS="--rm -v ${PWD}:${PWD} -w ${PWD} -e GOPATH=${PWD}/go"
if [ -n "$ZSH_VERSION" ]; then
docker run ${=DOCKER_RUN_OPTS} ${DOCKER_IMAGE} go "$@"
else
docker run ${DOCKER_RUN_OPTS} ${DOCKER_IMAGE} go "$@"
fi
}

function mage() {
DOCKER_IMAGE=go
DOCKER_RUN_OPTS="--rm -v ${PWD}:${PWD} -w ${PWD} -e GOPATH=${PWD}/go"
if [ -n "$ZSH_VERSION" ]; then
docker run ${=DOCKER_RUN_OPTS} ${DOCKER_IMAGE} mage "$@"
else
docker run ${DOCKER_RUN_OPTS} ${DOCKER_IMAGE} mage "$@"
fi
}
5 changes: 5 additions & 0 deletions Dockerfile.godev
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
FROM golang:1.18.3

RUN git clone https://github.com/magefile/mage \
&& cd mage \
&& go run bootstrap.go
File renamed without changes.
17 changes: 17 additions & 0 deletions Magefile.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
//+build mage

package main

import (
"fmt"
// mage:import
build "github.com/grafana/grafana-plugin-sdk-go/build"
)

// Hello prints a message (shows that you can define custom Mage targets).
func Hello() {
fmt.Println("hello plugin developer!")
}

// Default configures the default target.
var Default = build.BuildAll
5 changes: 4 additions & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,14 @@ all: build up
build: | build-docker build-splunk-datasource

build-docker:
docker build . -t node
docker build . -t node -f Dockerfile.node
docker build . -t go -f Dockerfile.godev

build-splunk-datasource:
yarn install
yarn build
go mod tidy
mage -v

up:
docker-compose up -d
Expand Down
8 changes: 8 additions & 0 deletions go.mod
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
module github.com/grafana/grafana-starter-datasource-backend

go 1.16

require (
github.com/grafana/grafana-plugin-sdk-go v0.136.0
github.com/magefile/mage v1.13.0 // indirect
)
659 changes: 659 additions & 0 deletions go.sum

Large diffs are not rendered by default.

24 changes: 24 additions & 0 deletions pkg/main.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
package main

import (
"os"

"github.com/grafana/grafana-plugin-sdk-go/backend/datasource"
"github.com/grafana/grafana-plugin-sdk-go/backend/log"
"github.com/grafana/grafana-starter-datasource-backend/pkg/plugin"
)

func main() {
// Start listening to requests sent from Grafana. This call is blocking so
// it won't finish until Grafana shuts down the process or the plugin choose
// to exit by itself using os.Exit. Manage automatically manages life cycle
// of datasource instances. It accepts datasource instance factory as first
// argument. This factory will be automatically called on incoming request
// from Grafana to create different instances of SampleDatasource (per datasource
// ID). When datasource configuration changed Dispose method will be called and
// new datasource instance created using NewSampleDatasource factory.
if err := datasource.Manage("myorgid-simple-backend-datasource", plugin.NewSampleDatasource, datasource.ManageOpts{}); err != nil {
log.DefaultLogger.Error(err.Error())
os.Exit(1)
}
}
194 changes: 194 additions & 0 deletions pkg/plugin/plugin.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,194 @@
package plugin

import (
"context"
"encoding/json"
"math/rand"
"time"

"github.com/grafana/grafana-plugin-sdk-go/backend"
"github.com/grafana/grafana-plugin-sdk-go/backend/instancemgmt"
"github.com/grafana/grafana-plugin-sdk-go/backend/log"
"github.com/grafana/grafana-plugin-sdk-go/data"
"github.com/grafana/grafana-plugin-sdk-go/live"
)

// Make sure SampleDatasource implements required interfaces. This is important to do
// since otherwise we will only get a not implemented error response from plugin in
// runtime. In this example datasource instance implements backend.QueryDataHandler,
// backend.CheckHealthHandler, backend.StreamHandler interfaces. Plugin should not
// implement all these interfaces - only those which are required for a particular task.
// For example if plugin does not need streaming functionality then you are free to remove
// methods that implement backend.StreamHandler. Implementing instancemgmt.InstanceDisposer
// is useful to clean up resources used by previous datasource instance when a new datasource
// instance created upon datasource settings changed.
var (
_ backend.QueryDataHandler = (*SampleDatasource)(nil)
_ backend.CheckHealthHandler = (*SampleDatasource)(nil)
_ backend.StreamHandler = (*SampleDatasource)(nil)
_ instancemgmt.InstanceDisposer = (*SampleDatasource)(nil)
)

// NewSampleDatasource creates a new datasource instance.
func NewSampleDatasource(_ backend.DataSourceInstanceSettings) (instancemgmt.Instance, error) {
return &SampleDatasource{}, nil
}

// SampleDatasource is an example datasource which can respond to data queries, reports
// its health and has streaming skills.
type SampleDatasource struct{}

// Dispose here tells plugin SDK that plugin wants to clean up resources when a new instance
// created. As soon as datasource settings change detected by SDK old datasource instance will
// be disposed and a new one will be created using NewSampleDatasource factory function.
func (d *SampleDatasource) Dispose() {
// Clean up datasource instance resources.
}

// QueryData handles multiple queries and returns multiple responses.
// req contains the queries []DataQuery (where each query contains RefID as a unique identifier).
// The QueryDataResponse contains a map of RefID to the response for each query, and each response
// contains Frames ([]*Frame).
func (d *SampleDatasource) QueryData(ctx context.Context, req *backend.QueryDataRequest) (*backend.QueryDataResponse, error) {
log.DefaultLogger.Info("QueryData called", "request", req)

// create response struct
response := backend.NewQueryDataResponse()

// loop over queries and execute them individually.
for _, q := range req.Queries {
res := d.query(ctx, req.PluginContext, q)

// save the response in a hashmap
// based on with RefID as identifier
response.Responses[q.RefID] = res
}

return response, nil
}

type queryModel struct {
WithStreaming bool `json:"withStreaming"`
}

func (d *SampleDatasource) query(_ context.Context, pCtx backend.PluginContext, query backend.DataQuery) backend.DataResponse {
response := backend.DataResponse{}

// Unmarshal the JSON into our queryModel.
var qm queryModel

response.Error = json.Unmarshal(query.JSON, &qm)
if response.Error != nil {
return response
}

// create data frame response.
frame := data.NewFrame("response")

// add fields.
frame.Fields = append(frame.Fields,
data.NewField("time", nil, []time.Time{query.TimeRange.From, query.TimeRange.To}),
data.NewField("values", nil, []int64{10, 20}),
)

// If query called with streaming on then return a channel
// to subscribe on a client-side and consume updates from a plugin.
// Feel free to remove this if you don't need streaming for your datasource.
if qm.WithStreaming {
channel := live.Channel{
Scope: live.ScopeDatasource,
Namespace: pCtx.DataSourceInstanceSettings.UID,
Path: "stream",
}
frame.SetMeta(&data.FrameMeta{Channel: channel.String()})
}

// add the frames to the response.
response.Frames = append(response.Frames, frame)

return response
}

// CheckHealth handles health checks sent from Grafana to the plugin.
// The main use case for these health checks is the test button on the
// datasource configuration page which allows users to verify that
// a datasource is working as expected.
func (d *SampleDatasource) CheckHealth(_ context.Context, req *backend.CheckHealthRequest) (*backend.CheckHealthResult, error) {
log.DefaultLogger.Info("CheckHealth called", "request", req)

var status = backend.HealthStatusOk
var message = "Data source is working"

if rand.Int()%2 == 0 {
status = backend.HealthStatusError
message = "randomized error"
}

return &backend.CheckHealthResult{
Status: status,
Message: message,
}, nil
}

// SubscribeStream is called when a client wants to connect to a stream. This callback
// allows sending the first message.
func (d *SampleDatasource) SubscribeStream(_ context.Context, req *backend.SubscribeStreamRequest) (*backend.SubscribeStreamResponse, error) {
log.DefaultLogger.Info("SubscribeStream called", "request", req)

status := backend.SubscribeStreamStatusPermissionDenied
if req.Path == "stream" {
// Allow subscribing only on expected path.
status = backend.SubscribeStreamStatusOK
}
return &backend.SubscribeStreamResponse{
Status: status,
}, nil
}

// RunStream is called once for any open channel. Results are shared with everyone
// subscribed to the same channel.
func (d *SampleDatasource) RunStream(ctx context.Context, req *backend.RunStreamRequest, sender *backend.StreamSender) error {
log.DefaultLogger.Info("RunStream called", "request", req)

// Create the same data frame as for query data.
frame := data.NewFrame("response")

// Add fields (matching the same schema used in QueryData).
frame.Fields = append(frame.Fields,
data.NewField("time", nil, make([]time.Time, 1)),
data.NewField("values", nil, make([]int64, 1)),
)

counter := 0

// Stream data frames periodically till stream closed by Grafana.
for {
select {
case <-ctx.Done():
log.DefaultLogger.Info("Context done, finish streaming", "path", req.Path)
return nil
case <-time.After(time.Second):
// Send new data periodically.
frame.Fields[0].Set(0, time.Now())
frame.Fields[1].Set(0, int64(10*(counter%2+1)))

counter++

err := sender.SendFrame(frame, data.IncludeAll)
if err != nil {
log.DefaultLogger.Error("Error sending frame", "error", err)
continue
}
}
}
}

// PublishStream is called when a client sends a message to the stream.
func (d *SampleDatasource) PublishStream(_ context.Context, req *backend.PublishStreamRequest) (*backend.PublishStreamResponse, error) {
log.DefaultLogger.Info("PublishStream called", "request", req)

// Do not allow publishing at all.
return &backend.PublishStreamResponse{
Status: backend.PublishStreamStatusPermissionDenied,
}, nil
}
30 changes: 30 additions & 0 deletions pkg/plugin/plugin_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
package plugin_test

import (
"context"
"testing"

"github.com/grafana/grafana-plugin-sdk-go/backend"
"github.com/grafana/grafana-starter-datasource-backend/pkg/plugin"
)

// This is where the tests for the datasource backend live.
func TestQueryData(t *testing.T) {
ds := plugin.SampleDatasource{}

resp, err := ds.QueryData(
context.Background(),
&backend.QueryDataRequest{
Queries: []backend.DataQuery{
{RefID: "A"},
},
},
)
if err != nil {
t.Error(err)
}

if len(resp.Responses) != 1 {
t.Fatal("QueryData must return a response")
}
}
Loading