Creating a Volume on the Lightbits Storage Server
To create a volume on the cluster, log into any of the Lightbits cluster servers and enter the lbcli create volume command.
Sample Command
$ lbcli create volume --size="200 Gib" --name=1vol --acl="acl3" --compression=true --replica-count=2 --project-name=default
Sample Output
root@rack03 -server72 :~ lbcli create volume --size="200 Gib" --name=1vol --acl="acl3" --
compression=true --replica -count =2 --project -name=default
Name UUID State Protection State NSID
Size Replicas Compression ACL
1vol c074cf03 -83fb -4c15 -a601 -5444 e798f51f Creating Unknown 0
200 GiB 2 true values:"acl3"
Delete the created volume through:
$ lbcli delete volume --name=$VOL_NAME --project -name=default
List Volume Details
$ lbcli get volume --uuid=c074cf03-83fb-4c15-a601-5444e798f51f --project-name=default -o json
root@rack03 -server72 :~ lbcli get volume --uuid=c074cf03 -83fb -4c15 -a601 -5444 e798f51f --
project -name=default -o json
{
"state": "Available",
"protectionState": "FullyProtected",
"replicaCount": 2,
"nodeList": [
"cfbfdc90 -43b3 -5ce6 -be99 -1 ab4171785b0",
"3b7a262a -ba3f -5447 -9c21 -e3c8ed699f0f"
],
"UUID": "c074cf03 -83fb -4c15 -a601 -5444 e798f51f",
"nsid": 4,
"acl": {
"values": [
"acl3"
]
},
"compression": "true",
"size": "214748364800",
"name": "1vol",
"rebuildProgress": "None",
"statistics": {
"logicalUsedStorage": "0",
"physicalUsedStorage": "0",
"compressionRatio": 1,
"totalCompressionRatio": 1,
"physicalCapacity": "0",
"physicalOwnedCapacity": "0",
"physicalOwnedMemory": "0",
"physicalMemory": "0",
"userWritten": "0"
},
"IPAcl": {
"values": [
"ALLOW_ANY"
]
},
"ETag": "1",
"connectedHosts": [
"acl3: 172.16.231.78"
],
"sectorSize": 4096,
"projectName": "default",
"sourceSnapshotUUID": "",
"sourceSnapshotName": "",
"placementRestrictions": [
]
}
Check the basic status of the Lightbits cluster through the lbcli command line.
$ lbcli list nvme-devices
Retrieves a list of NVMe devices in the cluster. The list can be filtered to list NVMe SSDs that exist in a specific server, are managed by a specific node, or are unmanaged.
root@rack03 -server72 :~ lbcli list nvme -devices
Name Size NUMA ID Serial State Server UUID
Node UUID
nvme2n1 932 GiB 0 PHLF736500GM1P0GGN Healthy 20afd09a -221d-5c87 -868ea64315bfbf57
8b980170 -f941 -5473 -9162 -36 a17e866a36
nvme0n1 932 GiB 0 PHLF736500H81P0GGN Healthy 20afd09a -221d-5c87 -868ea64315bfbf57
8b980170 -f941 -5473 -9162 -36 a17e866a36
nvme4n1 932 GiB 1 PHLF736500JL1P0GGN Healthy 20afd09a -221d-5c87 -868ea64315bfbf57
8b980170 -f941 -5473 -9162 -36 a17e866a36
nvme5n1 932 GiB 1 PHLF736500LN1P0GGN Healthy 20afd09a -221d-5c87 -868ea64315bfbf57
8b980170 -f941 -5473 -9162 -36 a17e866a36
nvme3n1 932 GiB 0 PHLF736500LY1P0GGN Healthy 20afd09a -221d-5c87 -868ea64315bfbf57
8b980170 -f941 -5473 -9162 -36 a17e866a36
nvme6n1 932 GiB 1 PHLF736500N41P0GGN Healthy 20afd09a -221d-5c87 -868ea64315bfbf57
8b980170 -f941 -5473 -9162 -36 a17e866a36
nvme5n1 932 GiB 0 PHLF727500A91P0GGN Healthy 8c0ae9ab -0327 -5978 -8ce0 -3
f63029809cb cfbfdc90 -43b3 -5ce6 -be99 -1 ab4171785b0
nvme10n1 932 GiB 0 PHLF727500P21P0GGN Healthy 8c0ae9ab -0327 -5978 -8ce0 -3
f63029809cb cfbfdc90 -43b3 -5ce6 -be99 -1 ab4171785b0
nvme0n1 932 GiB 0 PHLF727500QC1P0GGN Healthy 8c0ae9ab -0327 -5978 -8ce0 -3
f63029809cb cfbfdc90 -43b3 -5ce6 -be99 -1 ab4171785b0
nvme11n1 932 GiB 0 PHLF727500RN1P0GGN Healthy 8c0ae9ab -0327 -5978 -8ce0 -3
f63029809cb cfbfdc90 -43b3 -5ce6 -be99 -1 ab4171785b0
nvme2n1 931 GiB 0 PHLF7284013X1P0GGN Healthy 8c0ae9ab -0327 -5978 -8ce0 -3
f63029809cb cfbfdc90 -43b3 -5ce6 -be99 -1 ab4171785b0
nvme7n1 932 GiB 0 PHLF728500V31P0GGN Healthy 8c0ae9ab -0327 -5978 -8ce0 -3
f63029809cb cfbfdc90 -43b3 -5ce6 -be99 -1 ab4171785b0
nvme0n1 14 TiB 0 BTLL850602H815PDGN Healthy bc7e65cd -984c-5abb -ae3f -
a51a49dab2cb 3b7a262a -ba3f -5447 -9c21 -e3c8ed699f0f
nvme15n1 932 GiB 1 PHLF8091006C1P0GGN Healthy bc7e65cd -984c-5abb -ae3f -
a51a49dab2cb 3b7a262a -ba3f -5447 -9c21 -e3c8ed699f0f
nvme10n1 932 GiB 1 PHLF8091007H1P0GGN Healthy bc7e65cd -984c-5abb -ae3f -
a51a49dab2cb 3b7a262a -ba3f -5447 -9c21 -e3c8ed699f0f
nvme9n1 932 GiB 1 PHLF8091007R1P0GGN Healthy bc7e65cd -984c-5abb -ae3f -
a51a49dab2cb 3b7a262a -ba3f -5447 -9c21 -e3c8ed699f0f
nvme13n1 932 GiB 1 PHLF809100ZW1P0GGN Healthy bc7e65cd -984c-5abb -ae3f -
a51a49dab2cb 3b7a262a -ba3f -5447 -9c21 -e3c8ed699f0f
nvme12n1 932 GiB 1 PHLF8091016Y1P0GGN Healthy bc7e65cd -984c-5abb -ae3f -
a51a49dab2cb 3b7a262a -ba3f -5447 -9c21 -e3c8ed699f0f
$ lbcli list nodes
Retrieves a list of nodes in the cluster, and information on each node.
root@rack03 -server72 :~ lbcli list nodes
Name UUID State NVMe endpoint
Failure domains Loca
server02 -0 3b7a262a -ba3f -5447 -9c21 -e3c8ed699f0f Active 172.16.231.72:4420 [
server02] None
server00 -0 8b980170 -f941 -5473 -9162 -36 a17e866a36 Active 172.16.231.70:4420 [
server00] None
server01 -0 cfbfdc90 -43b3 -5ce6 -be99 -1 ab4171785b0 Active 172.16.231.71:4420 [
server01] None
Was this page helpful?