diff --git a/vultr/resource_vultr_block_storage.go b/vultr/resource_vultr_block_storage.go index 25049585..824ca34b 100644 --- a/vultr/resource_vultr_block_storage.go +++ b/vultr/resource_vultr_block_storage.go @@ -2,9 +2,12 @@ package vultr import ( "context" + "fmt" "log" "time" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/vultr/govultr/v2" @@ -33,6 +36,7 @@ func resourceVultrBlockStorage() *schema.Resource { "attached_to_instance": { Type: schema.TypeString, Optional: true, + Computed: true, }, "label": { Type: schema.TypeString, @@ -81,7 +85,11 @@ func resourceVultrBlockStorageCreate(ctx context.Context, d *schema.ResourceData d.SetId(bs.ID) log.Printf("[INFO] Block Storage ID: %s", d.Id()) - if instanceID, ok := d.GetOkExists("attached_to_instance"); ok { + if _, err = waitForBlockAvailable(ctx, d, "active", []string{"pending"}, "status", meta); err != nil { + return diag.Errorf("error while waiting for block %s to be completed: %s", d.Id(), err) + } + + if instanceID, ok := d.GetOk("attached_to_instance"); ok { log.Printf("[INFO] Attaching block storage (%s)", d.Id()) // Wait for the BS state to become active for 30 seconds @@ -198,3 +206,40 @@ func resourceVultrBlockStorageDelete(ctx context.Context, d *schema.ResourceData return nil } + +func waitForBlockAvailable(ctx context.Context, d *schema.ResourceData, target string, pending []string, attribute string, meta interface{}) (interface{}, error) { + log.Printf( + "[INFO] Waiting for Server (%s) to have %s of %s", + d.Id(), attribute, target) + + stateConf := &resource.StateChangeConf{ + Pending: pending, + Target: []string{target}, + Refresh: newBlockStateRefresh(ctx, d, meta, attribute), + Timeout: 60 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + NotFoundChecks: 60, + } + + return stateConf.WaitForStateContext(ctx) +} + +func newBlockStateRefresh(ctx context.Context, d *schema.ResourceData, meta interface{}, attr string) resource.StateRefreshFunc { + client := meta.(*Client).govultrClient() + return func() (interface{}, string, error) { + + log.Printf("[INFO] Creating Block") + block, err := client.BlockStorage.Get(ctx, d.Id()) + if err != nil { + return nil, "", fmt.Errorf("error retrieving block %s : %s", d.Id(), err) + } + + if attr == "status" { + log.Printf("[INFO] The Block Status is %s", block.Status) + return block, block.Status, nil + } else { + return nil, "", nil + } + } +} diff --git a/vultr/resource_vultr_instance.go b/vultr/resource_vultr_instance.go index df8d3631..896d6392 100644 --- a/vultr/resource_vultr_instance.go +++ b/vultr/resource_vultr_instance.go @@ -170,6 +170,11 @@ func resourceVultrInstance() *schema.Resource { }, }, }, + "block_storage_ids": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, // Computed "os": { Type: schema.TypeString, @@ -356,6 +361,15 @@ func resourceVultrInstanceCreate(ctx context.Context, d *schema.ResourceData, me } } + if blockStorageIDs, blockStorageIDsOk := d.GetOk("block_storage_ids"); blockStorageIDsOk { + for _, v := range blockStorageIDs.([]interface{}) { + req := &govultr.BlockStorageAttach{InstanceID: d.Id(), Live: govultr.BoolToBoolPtr(true)} + if err := client.BlockStorage.Attach(ctx, v.(string), req); err != nil { + return diag.Errorf("error attaching block storage id : %s", v.(string)) + } + } + } + return resourceVultrInstanceRead(ctx, d, meta) } @@ -513,7 +527,6 @@ func resourceVultrInstanceUpdate(ctx context.Context, d *schema.ResourceData, me for _, v := range diff(newIDs, oldIDs) { req.DetachPrivateNetwork = append(req.DetachPrivateNetwork, v) } - } if _, err := client.Instance.Update(ctx, d.Id(), req); err != nil { @@ -574,6 +587,53 @@ func resourceVultrInstanceUpdate(ctx context.Context, d *schema.ResourceData, me } } + if d.HasChange("block_storage_ids") { + log.Printf("[INFO] Updating private_network_ids") + oldBlock, newBlock := d.GetChange("block_storage_ids") + + var oldIDs []string + for _, v := range oldBlock.([]interface{}) { + oldIDs = append(oldIDs, v.(string)) + } + + var newIDs []string + for _, v := range newBlock.([]interface{}) { + newIDs = append(newIDs, v.(string)) + } + + diff := func(in, out []string) []string { + var diff []string + + b := map[string]string{} + for i := range in { + b[in[i]] = "" + } + + for i := range out { + if _, ok := b[out[i]]; !ok { + diff = append(diff, out[i]) + } + } + + return diff + } + + for _, v := range diff(oldIDs, newIDs) { + req := &govultr.BlockStorageAttach{InstanceID: d.Id(), Live: govultr.BoolToBoolPtr(true)} + if err := client.BlockStorage.Attach(ctx, v, req); err != nil { + return diag.Errorf("error attaching block storage id : %s", v) + } + + } + + for _, v := range diff(newIDs, oldIDs) { + req := &govultr.BlockStorageDetach{Live: govultr.BoolToBoolPtr(true)} + if err := client.BlockStorage.Detach(ctx, v, req); err != nil { + return diag.Errorf("error detaching block storage id : %s", v) + } + } + } + return resourceVultrInstanceRead(ctx, d, meta) } @@ -593,6 +653,15 @@ func resourceVultrInstanceDelete(ctx context.Context, d *schema.ResourceData, me } } + if blockIDs, blockOK := d.GetOk("block_storage_ids"); blockOK { + for _, v := range blockIDs.([]interface{}) { + req := &govultr.BlockStorageDetach{Live: govultr.BoolToBoolPtr(true)} + if err := client.BlockStorage.Detach(ctx, v.(string), req); err != nil { + return diag.Errorf("error detaching block storage id '%s' prior to deleting instance '%s'", v.(string), d.Id()) + } + } + } + if err := client.Instance.Delete(ctx, d.Id()); err != nil { return diag.Errorf("error destroying instance %s : %v", d.Id(), err) }