From 8034fbdd1091be38d1372125df12778a5b8b5000 Mon Sep 17 00:00:00 2001 From: Poornima G Date: Tue, 25 Dec 2018 15:23:15 +0530 Subject: [PATCH] Add loopback block provider Signed-off-by: Poornima G --- Gopkg.lock | 38 +++ Gopkg.toml | 4 + glusterd2/volume/fs_utils.go | 9 +- .../gluster-loopback/glusterloopback.go | 257 ++++++++++++++++++ plugins/blockvolume/init.go | 1 + 5 files changed, 305 insertions(+), 4 deletions(-) create mode 100644 plugins/blockvolume/blockprovider/gluster-loopback/glusterloopback.go diff --git a/Gopkg.lock b/Gopkg.lock index 9f776149b..bfe4ec92a 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -735,6 +735,43 @@ revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183" version = "v2.2.1" +[[projects]] + branch = "master" + digest = "1:f5fb6c4c9e14909a66efe7d3e49403234cd52523eb3997c5753a880635e8be2d" + name = "k8s.io/apimachinery" + packages = ["pkg/util/sets"] + pruneopts = "NUT" + revision = "fa6ddc151d63306b3540a37d910a07b181e4a474" + +[[projects]] + digest = "1:9cc257b3c9ff6a0158c9c661ab6eebda1fe8a4a4453cd5c4044dc9a2ebfb992b" + name = "k8s.io/klog" + packages = ["."] + pruneopts = "NUT" + revision = "a5bc97fbc634d635061f3146511332c7e313a55a" + version = "v0.1.0" + +[[projects]] + digest = "1:d230959a8578b5604f996833fb8e0c643d14700e1b9d72033526c6a3bed6464c" + name = "k8s.io/kubernetes" + packages = [ + "pkg/util/file", + "pkg/util/io", + "pkg/util/mount", + "pkg/util/nsenter", + ] + pruneopts = "NUT" + revision = "cff46ab41ff0bb44d8584413b598ad8360ec1def" + version = "v1.13.2" + +[[projects]] + branch = "master" + digest = "1:381323c2fe2e890a3dd3b5d6dc6f2199068408cca89b24f6b7ca1c60f32644a5" + name = "k8s.io/utils" + packages = ["exec"] + pruneopts = "NUT" + revision = "8a16e7dd8fb6d97d1331b0c79a16722f934b00b1" + [solve-meta] analyzer-name = "dep" analyzer-version = 1 @@ -779,6 +816,7 @@ "golang.org/x/sys/unix", "google.golang.org/grpc", "google.golang.org/grpc/codes", + "k8s.io/kubernetes/pkg/util/mount", ] solver-name = "gps-cdcl" solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index ee2fe1b74..d87a0a036 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -94,6 +94,10 @@ name = "github.com/godbus/dbus" version = "4.1.0" +[[constraint]] + name = "k8s.io/kubernetes" + version = "v1.13.0" + [prune] go-tests = true non-go = true diff --git a/glusterd2/volume/fs_utils.go b/glusterd2/volume/fs_utils.go index fec788bc9..69916725e 100644 --- a/glusterd2/volume/fs_utils.go +++ b/glusterd2/volume/fs_utils.go @@ -48,7 +48,8 @@ func createSizeInfo(fstat *syscall.Statfs_t) *SizeInfo { const fuseSuperMagic = 1702057286 -func mountVolume(name string, mountpoint string) error { +//MountVolume mounts the gluster volume on a given mount point +func MountVolume(name string, mountpoint string, mntOptns string) error { // NOTE: Why do it this way ? // * Libgfapi leaks memory on unmount. // * Glusterfs volumes cannot be mounted using syscall.Mount() @@ -67,8 +68,8 @@ func mountVolume(name string, mountpoint string) error { buffer.WriteString(fmt.Sprintf(" --volfile-server-port %s", sport)) buffer.WriteString(fmt.Sprintf(" --volfile-id %s", name)) buffer.WriteString(" --log-file /dev/null") - buffer.WriteString(" --read-only ") - buffer.WriteString(mountpoint) + buffer.WriteString(mntOptns) + buffer.WriteString(" " + mountpoint) args := strings.Fields(buffer.String()) cmd := exec.Command("glusterfs", args...) @@ -88,7 +89,7 @@ func UsageInfo(volname string) (*SizeInfo, error) { } defer os.Remove(tempDir) - if err := mountVolume(volname, tempDir); err != nil { + if err := MountVolume(volname, tempDir, " --read-only "); err != nil { return nil, err } defer syscall.Unmount(tempDir, syscall.MNT_FORCE) diff --git a/plugins/blockvolume/blockprovider/gluster-loopback/glusterloopback.go b/plugins/blockvolume/blockprovider/gluster-loopback/glusterloopback.go new file mode 100644 index 000000000..efacb49b7 --- /dev/null +++ b/plugins/blockvolume/blockprovider/gluster-loopback/glusterloopback.go @@ -0,0 +1,257 @@ +package glusterloopback + +import ( + "context" + "errors" + "fmt" + "io/ioutil" + "os" + + "github.com/gluster/glusterd2/glusterd2/volume" + "github.com/gluster/glusterd2/pkg/utils" + "github.com/gluster/glusterd2/plugins/blockvolume/blockprovider" + blkUtils "github.com/gluster/glusterd2/plugins/blockvolume/utils" + + log "github.com/sirupsen/logrus" + config "github.com/spf13/viper" + "k8s.io/kubernetes/pkg/util/mount" +) + +const providerName = "gluster-loopback" + +var mounter = mount.New("") + +func init() { + blockprovider.RegisterBlockProvider(providerName, newGlusterLoopBlk) +} + +// GlusterLoopBlk implements block Provider interface. It represents a gluster-block +type GlusterLoopBlk struct { + mounts map[string]string +} + +func newGlusterLoopBlk() (blockprovider.Provider, error) { + gb := &GlusterLoopBlk{} + + gb.mounts = make(map[string]string) + + return gb, nil +} + +func mountHost(g *GlusterLoopBlk, hostVolume string) (string, error) { + hostDir := g.mounts[hostVolume] + if hostDir == "" { + hostDir = config.GetString("rundir") + "/blockvolume/" + hostVolume + notMnt, err := mounter.IsLikelyNotMountPoint(hostDir) + if err != nil { + if os.IsNotExist(err) { + err = os.MkdirAll(hostDir, os.ModeDir|os.ModePerm) + if err != nil { + return "", fmt.Errorf("failed to create mount point %+v", err) + } + notMnt = true + } else { + return "", fmt.Errorf("failed to mount block host volume %+v", err) + } + } + + if notMnt { + err = volume.MountVolume(hostVolume, hostDir, "") + if err != nil { + return "", fmt.Errorf("failed to mount block host volume %+v", err) + } + } + g.mounts[hostVolume] = hostDir + } + return hostDir, nil +} + +// CreateBlockVolume will create a gluster block volume with given name and size having `hostVolume` as hosting volume +func (g *GlusterLoopBlk) CreateBlockVolume(name string, size uint64, hostVolume string, options ...blockprovider.BlockVolOption) (blockprovider.BlockVolume, error) { + blockVolOpts := &blockprovider.BlockVolumeOptions{} + blockVolOpts.ApplyOpts(options...) + logger := log.WithFields(log.Fields{ + "block_name": name, + "hostvol": hostVolume, + "requested_block_size": size, + }) + + hostDir, err := mountHost(g, hostVolume) + if err != nil { + return nil, fmt.Errorf("failed to mount block hosting volume %+v", err) + } + + blockFileName := hostDir + "/" + name + err = utils.ExecuteCommandRun("truncate", fmt.Sprintf("-s %d", size), blockFileName) //nolint: gosec + if err != nil { + return nil, fmt.Errorf("failed to truncate block file %s: %+v", blockFileName, err) + } + + err = utils.ExecuteCommandRun("mkfs.xfs", "-f", blockFileName) //nolint: gosec + if err != nil { + return nil, fmt.Errorf("failed to format block file %s: %+v", blockFileName, err) + } + + resizeFunc := func(blockHostingAvailableSize, blockSize uint64) uint64 { return blockHostingAvailableSize - blockSize } + if err = blkUtils.ResizeBlockHostingVolume(hostVolume, size, resizeFunc); err != nil { + logger.WithError(err).Error("failed in updating hostvolume _block-hosting-available-size metadata") + } + + return &BlockVolume{ + hostVolume: hostVolume, + name: name, + size: size, + }, err +} + +// DeleteBlockVolume deletes a gluster block volume of give name +func (g *GlusterLoopBlk) DeleteBlockVolume(name string, options ...blockprovider.BlockVolOption) error { + var ( + blockVolOpts = &blockprovider.BlockVolumeOptions{} + hostVol string + ) + + blockVolOpts.ApplyOpts(options...) + + // TODO: Listing all the block volumes to delete one block vol will bottleneck at scale. Possible options: + // - Let block delete carry the host volume(optionally). The caller needs to keep this info returned in create vol, and send it in delete req. + // - Build a map in memory ([blockvolume]hostvolume)during init(or lazy) during init of provider/create of block volume + blockVols := g.BlockVolumes() + + for _, blockVol := range blockVols { + if blockVol.Name() == name { + hostVol = blockVol.HostVolume() + break + } + } + + if hostVol == "" { + return errors.New("block volume not found") + } + + hostDir, err := mountHost(g, hostVol) + if err != nil { + return err + } + + blockFileName := hostDir + "/" + name + stat, err := os.Stat(blockFileName) + if err != nil { + return err + } + + err = os.Remove(blockFileName) + if err != nil { + return err + } + + size := stat.Size() + resizeFunc := func(blockHostingAvailableSize, blockSize uint64) uint64 { return blockHostingAvailableSize + blockSize } + if err = blkUtils.ResizeBlockHostingVolume(hostVol, size, resizeFunc); err != nil { + log.WithFields(log.Fields{ + "error": err, + "size": size, + }).Error("error in resizing the block hosting volume") + } + + return err +} + +// GetBlockVolume gives info about a gluster block volume +func (g *GlusterLoopBlk) GetBlockVolume(name string) (blockprovider.BlockVolume, error) { + var ( + blockVolume blockprovider.BlockVolume + availableBlockVolumes = g.BlockVolumes() + ) + + //TODO: looping through all block volumes to get one block vol info is not scalable, fix it + for _, blockVol := range availableBlockVolumes { + if blockVol.Name() == name { + blockVolume = blockVol + break + } + } + + if blockVolume == nil { + return nil, errors.New("block volume not found") + } + + glusterBlockVol := &BlockVolume{ + name: blockVolume.Name(), + hostVolume: blockVolume.HostVolume(), + size: blockVolume.Size(), + } + + return glusterBlockVol, nil +} + +// BlockVolumes returns all available gluster block volume +func (g *GlusterLoopBlk) BlockVolumes() []blockprovider.BlockVolume { + var glusterBlockVolumes = []blockprovider.BlockVolume{} + + volumes, err := volume.GetVolumes(context.Background()) + if err != nil { + return glusterBlockVolumes + } + + volumes = volume.ApplyFilters(volumes, volume.BlockHosted) + + for _, hostVol := range volumes { + hostDir, err := mountHost(g, hostVol.Name) + if err != nil { + return glusterBlockVolumes + } + + dirent, err := ioutil.ReadDir(hostDir) + if err != nil { + return glusterBlockVolumes + } + + for _, blockVol := range dirent { + glusterBlockVolumes = append(glusterBlockVolumes, &BlockVolume{name: blockVol.Name(), hostVolume: hostVol.Name, size: uint64(blockVol.Size())}) + } + } + + return glusterBlockVolumes +} + +// ProviderName returns name of block provider +func (g *GlusterLoopBlk) ProviderName() string { + return providerName +} + +// BlockVolume implements blockprovider.BlockVolume interface. +// It holds information about a gluster-block volume +type BlockVolume struct { + hosts []string + hostVolume string + name string + size uint64 +} + +// HostAddresses returns host addresses of a gluster block vol +func (gv *BlockVolume) HostAddresses() []string { return gv.hosts } + +// IQN returns IQN of a gluster block vol +func (gv *BlockVolume) IQN() string { return "" } + +// Username returns username of a gluster-block vol. +func (gv *BlockVolume) Username() string { return "" } + +// Password returns password for a gluster block vol +func (gv *BlockVolume) Password() string { return "" } + +// HostVolume returns host vol name of gluster block +func (gv *BlockVolume) HostVolume() string { return gv.hostVolume } + +// Name returns name of gluster block vol +func (gv *BlockVolume) Name() string { return gv.name } + +// Size returns size of a gluster block vol in bytes +func (gv *BlockVolume) Size() uint64 { return gv.size } + +// ID returns Gluster Block ID +func (gv *BlockVolume) ID() string { return "" } + +// HaCount returns high availability count +func (gv *BlockVolume) HaCount() int { return 0 } diff --git a/plugins/blockvolume/init.go b/plugins/blockvolume/init.go index 3bbd19897..cd1eb0ae0 100644 --- a/plugins/blockvolume/init.go +++ b/plugins/blockvolume/init.go @@ -3,4 +3,5 @@ package blockvolume import ( // initialise all block providers _ "github.com/gluster/glusterd2/plugins/blockvolume/blockprovider/gluster-block" + _ "github.com/gluster/glusterd2/plugins/blockvolume/blockprovider/gluster-loopback" )