-
Notifications
You must be signed in to change notification settings - Fork 85
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Implement two resources to allow maintenance of custom upgrade host groups. Signed-off-by: Kobi Samoray <kobi.samoray@broadcom.com>
- Loading branch information
Showing
5 changed files
with
716 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,188 @@ | ||
/* Copyright © 2024 Broadcom, Inc. All Rights Reserved. | ||
SPDX-License-Identifier: MPL-2.0 */ | ||
|
||
package nsxt | ||
|
||
import ( | ||
"fmt" | ||
"log" | ||
|
||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" | ||
"github.com/vmware/vsphere-automation-sdk-go/services/nsxt-mp/nsx/model" | ||
"github.com/vmware/vsphere-automation-sdk-go/services/nsxt-mp/nsx/upgrade" | ||
) | ||
|
||
func resourceNsxtHostUpgradeGroup() *schema.Resource { | ||
return &schema.Resource{ | ||
Create: resourceNsxtHostUpgradeGroupCreate, | ||
Read: resourceNsxtHostUpgradeGroupRead, | ||
Update: resourceNsxtHostUpgradeGroupUpdate, | ||
Delete: resourceNsxtHostUpgradeGroupDelete, | ||
Importer: &schema.ResourceImporter{ | ||
State: schema.ImportStatePassthrough, | ||
}, | ||
|
||
Schema: map[string]*schema.Schema{ | ||
"revision": getRevisionSchema(), | ||
"description": getDescriptionSchema(), | ||
"display_name": getDisplayNameSchema(), | ||
"tag": getTagsSchema(), | ||
"enabled": { | ||
Type: schema.TypeBool, | ||
Description: "Flag to indicate whether upgrade of this group is enabled or not", | ||
Optional: true, | ||
Default: true, | ||
}, | ||
"extended_configuration": { | ||
Type: schema.TypeList, | ||
Optional: true, | ||
Description: "Advanced configuration", | ||
Elem: &schema.Resource{ | ||
Schema: map[string]*schema.Schema{ | ||
"key": { | ||
Type: schema.TypeString, | ||
Required: true, | ||
}, | ||
"value": { | ||
Type: schema.TypeString, | ||
Required: true, | ||
}, | ||
}, | ||
}, | ||
}, | ||
"parallel": { | ||
Type: schema.TypeBool, | ||
Description: "Upgrade method to specify whether the upgrade is to be performed in parallel or serially", | ||
Optional: true, | ||
Default: true, | ||
}, | ||
"pause_after_each_upgrade_unit": { | ||
Type: schema.TypeBool, | ||
Description: "Flag to indicate whether upgrade should be paused after upgrade of each upgrade-unit", | ||
Optional: true, | ||
Default: false, | ||
}, | ||
}, | ||
} | ||
} | ||
|
||
func getUnitGroupFromSchema(d *schema.ResourceData) model.UpgradeUnitGroup { | ||
description := d.Get("description").(string) | ||
displayName := d.Get("display_name").(string) | ||
tags := getMPTagsFromSchema(d) | ||
enabled := d.Get("enabled").(bool) | ||
ec := d.Get("extended_configuration") | ||
var extendedConfiguration []model.KeyValuePair | ||
if ec != nil { | ||
for _, ec := range ec.([]interface{}) { | ||
ecMap := ec.(map[string]interface{}) | ||
key := ecMap["key"].(string) | ||
val := ecMap["value"].(string) | ||
extendedConfiguration = append(extendedConfiguration, model.KeyValuePair{Key: &key, Value: &val}) | ||
} | ||
} | ||
parallel := d.Get("parallel").(bool) | ||
pauseAfterEachUpgradeUnit := d.Get("pause_after_each_upgrade_unit").(bool) | ||
unitType := "HOST" | ||
|
||
return model.UpgradeUnitGroup{ | ||
Description: &description, | ||
DisplayName: &displayName, | ||
Tags: tags, | ||
Enabled: &enabled, | ||
ExtendedConfiguration: extendedConfiguration, | ||
Parallel: ¶llel, | ||
PauseAfterEachUpgradeUnit: &pauseAfterEachUpgradeUnit, | ||
Type_: &unitType, | ||
} | ||
} | ||
|
||
func addHostUpgradeUnitToPredefinedGroup(id string) error { | ||
// Get host to find out its compute cluster | ||
return nil | ||
} | ||
|
||
func resourceNsxtHostUpgradeGroupCreate(d *schema.ResourceData, m interface{}) error { | ||
connector := getPolicyConnector(m) | ||
client := upgrade.NewUpgradeUnitGroupsClient(connector) | ||
|
||
obj := getUnitGroupFromSchema(d) | ||
log.Printf("[INFO] Creating Host Upgrade Group %s", *obj.DisplayName) | ||
obj, err := client.Create(obj) | ||
if err != nil { | ||
return handleCreateError("Host Upgrade Group", *obj.DisplayName, err) | ||
} | ||
|
||
d.SetId(*obj.Id) | ||
|
||
return resourceNsxtHostUpgradeGroupRead(d, m) | ||
} | ||
|
||
func resourceNsxtHostUpgradeGroupRead(d *schema.ResourceData, m interface{}) error { | ||
id := d.Id() | ||
if id == "" { | ||
return fmt.Errorf("error obtaining Host Upgrade Group ID") | ||
} | ||
|
||
connector := getPolicyConnector(m) | ||
client := upgrade.NewUpgradeUnitGroupsClient(connector) | ||
obj, err := client.Get(id, nil) | ||
if err != nil { | ||
return handleReadError(d, "Host Upgrade Group", id, err) | ||
} | ||
|
||
d.Set("display_name", obj.DisplayName) | ||
d.Set("description", obj.Description) | ||
setMPTagsInSchema(d, obj.Tags) | ||
d.Set("revision", obj.Revision) | ||
d.Set("enabled", obj.Enabled) | ||
var extendedConfiguration []interface{} | ||
for _, ec := range obj.ExtendedConfiguration { | ||
var ecMap map[string]interface{} | ||
ecMap["key"] = ec.Key | ||
ecMap["value"] = ec.Value | ||
extendedConfiguration = append(extendedConfiguration, ecMap) | ||
} | ||
d.Set("extended_configuration", extendedConfiguration) | ||
d.Set("parallel", obj.Parallel) | ||
d.Set("pause_after_each_upgrade_unit", obj.PauseAfterEachUpgradeUnit) | ||
|
||
return nil | ||
} | ||
|
||
func resourceNsxtHostUpgradeGroupUpdate(d *schema.ResourceData, m interface{}) error { | ||
id := d.Id() | ||
if id == "" { | ||
return fmt.Errorf("error obtaining Host Upgrade Group ID") | ||
} | ||
|
||
connector := getPolicyConnector(m) | ||
client := upgrade.NewUpgradeUnitGroupsClient(connector) | ||
|
||
obj := getUnitGroupFromSchema(d) | ||
revision := int64(d.Get("revision").(int)) | ||
obj.Revision = &revision | ||
_, err := client.Update(id, obj) | ||
if err != nil { | ||
return handleUpdateError("Host Upgrade Group", id, err) | ||
} | ||
|
||
return resourceNsxtHostUpgradeGroupRead(d, m) | ||
} | ||
|
||
func resourceNsxtHostUpgradeGroupDelete(d *schema.ResourceData, m interface{}) error { | ||
id := d.Id() | ||
if id == "" { | ||
return fmt.Errorf("error obtaining Host Upgrade Group ID") | ||
} | ||
|
||
connector := getPolicyConnector(m) | ||
client := upgrade.NewUpgradeUnitGroupsClient(connector) | ||
|
||
err := client.Delete(id) | ||
if err != nil { | ||
return handleDeleteError("Host Upgrade Group", id, err) | ||
} | ||
|
||
return nil | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,197 @@ | ||
/* Copyright © 2024 Broadcom, Inc. All Rights Reserved. | ||
SPDX-License-Identifier: MPL-2.0 */ | ||
|
||
package nsxt | ||
|
||
import ( | ||
"fmt" | ||
|
||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" | ||
"github.com/vmware/vsphere-automation-sdk-go/lib/vapi/std/errors" | ||
"github.com/vmware/vsphere-automation-sdk-go/runtime/bindings" | ||
"github.com/vmware/vsphere-automation-sdk-go/runtime/protocol/client" | ||
"github.com/vmware/vsphere-automation-sdk-go/services/nsxt-mp/nsx" | ||
"github.com/vmware/vsphere-automation-sdk-go/services/nsxt-mp/nsx/model" | ||
"github.com/vmware/vsphere-automation-sdk-go/services/nsxt-mp/nsx/upgrade" | ||
"golang.org/x/exp/slices" | ||
) | ||
|
||
const hostUpgradeUnitDefaultGroup = "Group 1 for ESXI" | ||
|
||
func resourceNsxtHostUpgradeGroupAssignment() *schema.Resource { | ||
return &schema.Resource{ | ||
Create: resourceNsxtHostUpgradeGroupAssignmentCreate, | ||
Read: resourceNsxtHostUpgradeGroupAssignmentRead, | ||
Update: resourceNsxtHostUpgradeGroupAssignmentUpdate, | ||
Delete: resourceNsxtHostUpgradeGroupAssignmentDelete, | ||
Importer: &schema.ResourceImporter{ | ||
State: resourceNsxtHostUpgradeGroupAssignmentImport, | ||
}, | ||
|
||
Schema: map[string]*schema.Schema{ | ||
"host_id": { | ||
Type: schema.TypeString, | ||
Description: "The ID of the ESXi host that this group assigns to", | ||
Required: true, | ||
ForceNew: true, | ||
}, | ||
"host_upgrade_group_id": { | ||
Type: schema.TypeString, | ||
Description: "The ID of the ESXi host group that the host is assigned to", | ||
Required: true, | ||
}, | ||
}, | ||
} | ||
} | ||
|
||
func getUnitIDsFromUnits(units []model.UpgradeUnit) []string { | ||
var unitIDs []string | ||
|
||
for _, unit := range units { | ||
unitIDs = append(unitIDs, *unit.Id) | ||
} | ||
return unitIDs | ||
} | ||
|
||
func addHostToGroup(connector client.Connector, groupID, hostID string, isCreate bool) error { | ||
client := upgrade.NewUpgradeUnitGroupsClient(connector) | ||
|
||
group, err := client.Get(groupID, nil) | ||
if err != nil { | ||
return err | ||
} | ||
|
||
hostIDs := getUnitIDsFromUnits(group.UpgradeUnits) | ||
if slices.Contains(hostIDs, hostID) { | ||
return fmt.Errorf("host %s already exists in group %s", hostID, groupID) | ||
} | ||
group.UpgradeUnits = append(group.UpgradeUnits, model.UpgradeUnit{Id: &hostID}) | ||
_, err = client.Update(groupID, group) | ||
if err != nil { | ||
return err | ||
} | ||
return nil | ||
} | ||
|
||
func resourceNsxtHostUpgradeGroupAssignmentCreate(d *schema.ResourceData, m interface{}) error { | ||
hostID := d.Get("host_id").(string) | ||
groupID := d.Get("host_upgrade_group_id").(string) | ||
|
||
connector := getPolicyConnector(m) | ||
err := addHostToGroup(connector, groupID, hostID, true) | ||
if err != nil { | ||
return handleCreateError("Host Upgrade Group Assignment", hostID, err) | ||
} | ||
|
||
d.SetId(hostID) | ||
|
||
return resourceNsxtHostUpgradeGroupAssignmentRead(d, m) | ||
} | ||
|
||
func resourceNsxtHostUpgradeGroupAssignmentRead(d *schema.ResourceData, m interface{}) error { | ||
hostID := d.Id() | ||
groupID := d.Get("host_upgrade_group_id").(string) | ||
|
||
connector := getPolicyConnector(m) | ||
client := upgrade.NewUpgradeUnitGroupsClient(connector) | ||
|
||
group, err := client.Get(groupID, nil) | ||
if err != nil { | ||
return handleCreateError("Host Upgrade Group Assignment", groupID, err) | ||
} | ||
|
||
hostIDs := getUnitIDsFromUnits(group.UpgradeUnits) | ||
if !slices.Contains(hostIDs, hostID) { | ||
return errors.NotFound{} | ||
} | ||
|
||
return nil | ||
} | ||
|
||
func resourceNsxtHostUpgradeGroupAssignmentUpdate(d *schema.ResourceData, m interface{}) error { | ||
hostID := d.Id() | ||
groupID := d.Get("host_upgrade_group_id").(string) | ||
|
||
connector := getPolicyConnector(m) | ||
err := addHostToGroup(connector, groupID, hostID, false) | ||
if err != nil { | ||
return handleUpdateError("Host Upgrade Group Assignment", hostID, err) | ||
} | ||
|
||
return resourceNsxtHostUpgradeGroupAssignmentRead(d, m) | ||
} | ||
|
||
func getHostDefaultUpgradeGroup(connector client.Connector, hostID string) (string, error) { | ||
hostClient := nsx.NewTransportNodesClient(connector) | ||
host, err := hostClient.Get(hostID) | ||
if err != nil { | ||
return "", err | ||
} | ||
converter := bindings.NewTypeConverter() | ||
base, errs := converter.ConvertToGolang(host.NodeDeploymentInfo, model.HostNodeBindingType()) | ||
if errs != nil { | ||
return "", errs[0] | ||
} | ||
node := base.(model.HostNode) | ||
|
||
if node.ComputeCollectionId != nil { | ||
return *node.ComputeCollectionId, nil | ||
} | ||
|
||
// This host is not a part of a compute cluster: | ||
// it should be assigned to the 'Group 1 for ESXI' group (this value is hardcoded in NSX) | ||
groupClient := upgrade.NewUpgradeUnitGroupsClient(connector) | ||
componentType := "HOST" | ||
hostGroups, err := groupClient.List(&componentType, nil, nil, nil, nil, nil, nil, nil) | ||
if err != nil { | ||
return "", err | ||
} | ||
if hostGroups.Results != nil { | ||
for _, group := range hostGroups.Results { | ||
if group.DisplayName != nil && *group.DisplayName == hostUpgradeUnitDefaultGroup { | ||
return *group.Id, nil | ||
} | ||
} | ||
} | ||
|
||
return "", errors.NotFound{} | ||
} | ||
|
||
func resourceNsxtHostUpgradeGroupAssignmentDelete(d *schema.ResourceData, m interface{}) error { | ||
// Removal of a group is tricky, as a host has to be included in some group. So we can add it to its original group | ||
// and that will remove it from its custom group. | ||
hostID := d.Id() | ||
|
||
connector := getPolicyConnector(m) | ||
groupID, err := getHostDefaultUpgradeGroup(connector, hostID) | ||
if isNotFoundError(err) { | ||
return fmt.Errorf("couldn't find default group for host %s as default group was not found", hostID) | ||
} else if err != nil { | ||
return handleDeleteError("Host Upgrade Group Assignment", hostID, err) | ||
} | ||
return addHostToGroup(connector, groupID, hostID, false) | ||
} | ||
|
||
func resourceNsxtHostUpgradeGroupAssignmentImport(d *schema.ResourceData, m interface{}) ([]*schema.ResourceData, error) { | ||
hostID := d.Id() | ||
connector := getPolicyConnector(m) | ||
groupClient := upgrade.NewUpgradeUnitGroupsClient(connector) | ||
componentType := "HOST" | ||
hostGroups, err := groupClient.List(&componentType, nil, nil, nil, nil, nil, nil, nil) | ||
if err != nil { | ||
return nil, err | ||
} | ||
if hostGroups.Results != nil { | ||
for _, group := range hostGroups.Results { | ||
hostIDs := getUnitIDsFromUnits(group.UpgradeUnits) | ||
if slices.Contains(hostIDs, hostID) { | ||
d.Set("host_id", hostID) | ||
d.Set("host_upgrade_group_id", group.Id) | ||
d.SetId(hostID) | ||
return []*schema.ResourceData{d}, nil | ||
} | ||
} | ||
} | ||
|
||
return nil, errors.NotFound{} | ||
} |
Oops, something went wrong.