|
| 1 | +/* |
| 2 | +Use this data source to query detailed instance information of Ckafka |
| 3 | +
|
| 4 | +Example Usage |
| 5 | +
|
| 6 | +```hcl |
| 7 | +data "tencentcloud_ckafka_instances" "foo" { |
| 8 | + instance_ids=["ckafka-vv7wpvae"] |
| 9 | +} |
| 10 | +``` |
| 11 | +*/ |
| 12 | +package tencentcloud |
| 13 | + |
| 14 | +import ( |
| 15 | + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" |
| 16 | + ckafka "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/ckafka/v20190819" |
| 17 | + "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" |
| 18 | +) |
| 19 | + |
| 20 | +func dataSourceTencentCloudCkafkaInstances() *schema.Resource { |
| 21 | + return &schema.Resource{ |
| 22 | + Read: dataSourceTencentCloudCkafkaInstancesRead, |
| 23 | + |
| 24 | + Schema: map[string]*schema.Schema{ |
| 25 | + "instance_ids": { |
| 26 | + Type: schema.TypeList, |
| 27 | + Optional: true, |
| 28 | + Elem: &schema.Schema{Type: schema.TypeString}, |
| 29 | + Description: "Filter by instance ID.", |
| 30 | + }, |
| 31 | + "search_word": { |
| 32 | + Type: schema.TypeString, |
| 33 | + Optional: true, |
| 34 | + Description: "Filter by instance name, support fuzzy query.", |
| 35 | + }, |
| 36 | + "tag_key": { |
| 37 | + Type: schema.TypeString, |
| 38 | + Optional: true, |
| 39 | + Description: "Matches the tag key value.", |
| 40 | + }, |
| 41 | + "status": { |
| 42 | + Type: schema.TypeList, |
| 43 | + Optional: true, |
| 44 | + Elem: &schema.Schema{Type: schema.TypeInt}, |
| 45 | + Description: "(Filter Criteria) The status of the instance. 0: Create, 1: Run, 2: Delete, do not fill the default return all.", |
| 46 | + }, |
| 47 | + "filters": { |
| 48 | + Type: schema.TypeList, |
| 49 | + Optional: true, |
| 50 | + Elem: &schema.Resource{ |
| 51 | + Schema: map[string]*schema.Schema{ |
| 52 | + "name": { |
| 53 | + Type: schema.TypeString, |
| 54 | + Required: true, |
| 55 | + Description: "The field that needs to be filtered.", |
| 56 | + }, |
| 57 | + "values": { |
| 58 | + Type: schema.TypeList, |
| 59 | + Elem: &schema.Schema{ |
| 60 | + Type: schema.TypeString, |
| 61 | + }, |
| 62 | + Required: true, |
| 63 | + Description: "The filtered value of the field.", |
| 64 | + }, |
| 65 | + }, |
| 66 | + }, |
| 67 | + Description: "Filter. filter.name supports ('Ip', 'VpcId', 'SubNetId', 'InstanceType','InstanceId'), filter.values can pass up to 10 values.", |
| 68 | + }, |
| 69 | + "offset": { |
| 70 | + Type: schema.TypeInt, |
| 71 | + Optional: true, |
| 72 | + Default: 0, |
| 73 | + Description: "The page start offset, default is `0`.", |
| 74 | + }, |
| 75 | + "limit": { |
| 76 | + Type: schema.TypeInt, |
| 77 | + Optional: true, |
| 78 | + Default: 10, |
| 79 | + Description: "The number of pages, default is `10`.", |
| 80 | + }, |
| 81 | + "result_output_file": { |
| 82 | + Type: schema.TypeString, |
| 83 | + Optional: true, |
| 84 | + Description: "Used to save results.", |
| 85 | + }, |
| 86 | + "instance_list": { |
| 87 | + Type: schema.TypeList, |
| 88 | + Computed: true, |
| 89 | + Description: "A list of ckafka users. Each element contains the following attributes:", |
| 90 | + Elem: &schema.Resource{ |
| 91 | + Schema: map[string]*schema.Schema{ |
| 92 | + "instance_id": { |
| 93 | + Type: schema.TypeString, |
| 94 | + Computed: true, |
| 95 | + Description: "The instance ID.", |
| 96 | + }, |
| 97 | + "instance_name": { |
| 98 | + Type: schema.TypeString, |
| 99 | + Computed: true, |
| 100 | + Description: "The instance name.", |
| 101 | + }, |
| 102 | + "vip": { |
| 103 | + Type: schema.TypeString, |
| 104 | + Computed: true, |
| 105 | + Description: "Virtual IP.", |
| 106 | + }, |
| 107 | + "vport": { |
| 108 | + Type: schema.TypeString, |
| 109 | + Computed: true, |
| 110 | + Description: "Virtual PORT.", |
| 111 | + }, |
| 112 | + "vip_list": { |
| 113 | + Type: schema.TypeList, |
| 114 | + Optional: true, |
| 115 | + MaxItems: 1, |
| 116 | + Computed: true, |
| 117 | + Elem: &schema.Resource{ |
| 118 | + Schema: map[string]*schema.Schema{ |
| 119 | + "vip": { |
| 120 | + Type: schema.TypeString, |
| 121 | + Computed: true, |
| 122 | + Description: "Virtual IP.", |
| 123 | + }, |
| 124 | + "vport": { |
| 125 | + Type: schema.TypeString, |
| 126 | + Computed: true, |
| 127 | + Description: "Virtual PORT.", |
| 128 | + }, |
| 129 | + }, |
| 130 | + }, |
| 131 | + Description: "Virtual IP entities.", |
| 132 | + }, |
| 133 | + "status": { |
| 134 | + Type: schema.TypeInt, |
| 135 | + Computed: true, |
| 136 | + Description: "The status of the instance. 0: Created, 1: Running, 2: Delete: 5 Quarantined, -1 Creation failed.", |
| 137 | + }, |
| 138 | + "bandwidth": { |
| 139 | + Type: schema.TypeInt, |
| 140 | + Computed: true, |
| 141 | + Description: "Instance bandwidth, in Mbps.", |
| 142 | + }, |
| 143 | + "disk_size": { |
| 144 | + Type: schema.TypeInt, |
| 145 | + Computed: true, |
| 146 | + Description: "The storage size of the instance, in GB.", |
| 147 | + }, |
| 148 | + "zone_id": { |
| 149 | + Type: schema.TypeInt, |
| 150 | + Computed: true, |
| 151 | + Description: "Availability Zone ID.", |
| 152 | + }, |
| 153 | + "vpc_id": { |
| 154 | + Type: schema.TypeString, |
| 155 | + Computed: true, |
| 156 | + Description: "VpcId, if empty, indicates that it is the underlying network.", |
| 157 | + }, |
| 158 | + "subnet_id": { |
| 159 | + Type: schema.TypeString, |
| 160 | + Computed: true, |
| 161 | + Description: "Subnet id.", |
| 162 | + }, |
| 163 | + "renew_flag": { |
| 164 | + Type: schema.TypeInt, |
| 165 | + Computed: true, |
| 166 | + Description: "Whether the instance is renewed, the int enumeration value: 1 indicates auto-renewal, and 2 indicates that it is not automatically renewed.", |
| 167 | + }, |
| 168 | + "healthy": { |
| 169 | + Type: schema.TypeInt, |
| 170 | + Computed: true, |
| 171 | + Description: "Instance status int: 1 indicates health, 2 indicates alarm, and 3 indicates abnormal instance status.", |
| 172 | + }, |
| 173 | + "healthy_message": { |
| 174 | + Type: schema.TypeString, |
| 175 | + Computed: true, |
| 176 | + Description: "Instance status information.", |
| 177 | + }, |
| 178 | + "create_time": { |
| 179 | + Type: schema.TypeInt, |
| 180 | + Computed: true, |
| 181 | + Description: "The time when the instance was created.", |
| 182 | + }, |
| 183 | + "expire_time": { |
| 184 | + Type: schema.TypeInt, |
| 185 | + Computed: true, |
| 186 | + Description: "The instance expiration time.", |
| 187 | + }, |
| 188 | + "is_internal": { |
| 189 | + Type: schema.TypeInt, |
| 190 | + Computed: true, |
| 191 | + Description: "Whether it is an internal customer. A value of 1 indicates an internal customer.", |
| 192 | + }, |
| 193 | + "topic_num": { |
| 194 | + Type: schema.TypeInt, |
| 195 | + Computed: true, |
| 196 | + Description: "The number of topics.", |
| 197 | + }, |
| 198 | + "tags": { |
| 199 | + Type: schema.TypeList, |
| 200 | + Computed: true, |
| 201 | + Elem: &schema.Resource{ |
| 202 | + Schema: map[string]*schema.Schema{ |
| 203 | + "tag_key": { |
| 204 | + Type: schema.TypeString, |
| 205 | + Computed: true, |
| 206 | + Description: "Tag Key.", |
| 207 | + }, |
| 208 | + "tag_value": { |
| 209 | + Type: schema.TypeString, |
| 210 | + Computed: true, |
| 211 | + Description: "Tag Value.", |
| 212 | + }, |
| 213 | + }, |
| 214 | + }, |
| 215 | + Description: "Tag infomation.", |
| 216 | + }, |
| 217 | + "version": { |
| 218 | + Type: schema.TypeString, |
| 219 | + Computed: true, |
| 220 | + Description: "Kafka version information. Note: This field may return null, indicating that a valid value could not be retrieved.", |
| 221 | + }, |
| 222 | + "zone_ids": { |
| 223 | + Type: schema.TypeList, |
| 224 | + Computed: true, |
| 225 | + Elem: &schema.Schema{Type: schema.TypeInt}, |
| 226 | + Description: "Across Availability Zones. Note: This field may return null, indicating that a valid value could not be retrieved.", |
| 227 | + }, |
| 228 | + "cvm": { |
| 229 | + Type: schema.TypeInt, |
| 230 | + Computed: true, |
| 231 | + Description: "ckafka sale type. Note: This field may return null, indicating that a valid value could not be retrieved.", |
| 232 | + }, |
| 233 | + "instance_type": { |
| 234 | + Type: schema.TypeString, |
| 235 | + Computed: true, |
| 236 | + Description: "ckafka instance type. Note: This field may return null, indicating that a valid value could not be retrieved.", |
| 237 | + }, |
| 238 | + "disk_type": { |
| 239 | + Type: schema.TypeString, |
| 240 | + Computed: true, |
| 241 | + Description: "Disk Type. Note: This field may return null, indicating that a valid value could not be retrieved.", |
| 242 | + }, |
| 243 | + "max_topic_number": { |
| 244 | + Type: schema.TypeInt, |
| 245 | + Computed: true, |
| 246 | + Description: "The maximum number of topics in the current specifications. Note: This field may return null, indicating that a valid value could not be retrieved..", |
| 247 | + }, |
| 248 | + "max_partition_number": { |
| 249 | + Type: schema.TypeInt, |
| 250 | + Computed: true, |
| 251 | + Description: "The maximum number of Partitions for the current specifications. Note: This field may return null, indicating that a valid value could not be retrieved.", |
| 252 | + }, |
| 253 | + "rebalance_time": { |
| 254 | + Type: schema.TypeString, |
| 255 | + Computed: true, |
| 256 | + Description: "Schedule the upgrade configuration time. Note: This field may return null, indicating that a valid value could not be retrieved..", |
| 257 | + }, |
| 258 | + "partition_number": { |
| 259 | + Type: schema.TypeInt, |
| 260 | + Computed: true, |
| 261 | + Description: "The current number of instances. Note: This field may return null, indicating that a valid value could not be retrieved..", |
| 262 | + }, |
| 263 | + "public_network_charge_type": { |
| 264 | + Type: schema.TypeString, |
| 265 | + Computed: true, |
| 266 | + Description: "The type of Internet bandwidth. Note: This field may return null, indicating that a valid value could not be retrieved..", |
| 267 | + }, |
| 268 | + "public_network": { |
| 269 | + Type: schema.TypeInt, |
| 270 | + Computed: true, |
| 271 | + Description: "The Internet bandwidth value. Note: This field may return null, indicating that a valid value could not be retrieved..", |
| 272 | + }, |
| 273 | + }, |
| 274 | + }, |
| 275 | + }, |
| 276 | + }, |
| 277 | + } |
| 278 | +} |
| 279 | + |
| 280 | +func dataSourceTencentCloudCkafkaInstancesRead(d *schema.ResourceData, meta interface{}) error { |
| 281 | + defer logElapsed("data_source.tencentcloud_ckafka_instances.read")() |
| 282 | + |
| 283 | + ckafkaService := CkafkaService{ |
| 284 | + client: meta.(*TencentCloudClient).apiV3Conn, |
| 285 | + } |
| 286 | + request := ckafka.NewDescribeInstancesDetailRequest() |
| 287 | + if v, ok := d.GetOk("instance_ids"); ok { |
| 288 | + request.InstanceIdList = helper.InterfacesStringsPoint(v.([]interface{})) |
| 289 | + } |
| 290 | + if v, ok := d.GetOk("search_word"); ok { |
| 291 | + request.SearchWord = helper.String(v.(string)) |
| 292 | + } |
| 293 | + if v, ok := d.GetOk("tag_key"); ok { |
| 294 | + request.TagKey = helper.String(v.(string)) |
| 295 | + } |
| 296 | + if v, ok := d.GetOk("status"); ok { |
| 297 | + request.Status = helper.InterfacesIntInt64Point(v.([]interface{})) |
| 298 | + } |
| 299 | + if v, ok := d.GetOk("filters"); ok { |
| 300 | + filterParams := v.([]interface{}) |
| 301 | + filters := make([]*ckafka.Filter, 0) |
| 302 | + for _, filterParam := range filterParams { |
| 303 | + filterParamMap := filterParam.(map[string]interface{}) |
| 304 | + filters = append(filters, &ckafka.Filter{ |
| 305 | + Name: helper.String(filterParamMap["name"].(string)), |
| 306 | + Values: helper.InterfacesStringsPoint(filterParamMap["values"].([]interface{})), |
| 307 | + }) |
| 308 | + } |
| 309 | + request.Filters = filters |
| 310 | + } |
| 311 | + if v, ok := d.GetOk("offset"); ok { |
| 312 | + request.Offset = helper.IntInt64(v.(int)) |
| 313 | + } |
| 314 | + if v, ok := d.GetOk("limit"); ok { |
| 315 | + request.Limit = helper.IntInt64(v.(int)) |
| 316 | + } |
| 317 | + |
| 318 | + response, err := ckafkaService.client.UseCkafkaClient().DescribeInstancesDetail(request) |
| 319 | + if err != nil { |
| 320 | + return err |
| 321 | + } |
| 322 | + var kafkaInstanceDetails []*ckafka.InstanceDetail |
| 323 | + if response.Response.Result != nil { |
| 324 | + kafkaInstanceDetails = response.Response.Result.InstanceList |
| 325 | + } |
| 326 | + result := make([]map[string]interface{}, 0) |
| 327 | + ids := make([]string, 0) |
| 328 | + for _, kafkaInstanceDetail := range kafkaInstanceDetails { |
| 329 | + kafkaInstanceDetailMap := make(map[string]interface{}) |
| 330 | + ids = append(ids, *kafkaInstanceDetail.InstanceId) |
| 331 | + kafkaInstanceDetailMap["instance_id"] = kafkaInstanceDetail.InstanceId |
| 332 | + kafkaInstanceDetailMap["instance_name"] = kafkaInstanceDetail.InstanceName |
| 333 | + kafkaInstanceDetailMap["vip"] = kafkaInstanceDetail.Vip |
| 334 | + kafkaInstanceDetailMap["vport"] = kafkaInstanceDetail.Vport |
| 335 | + kafkaInstanceDetailMap["status"] = kafkaInstanceDetail.Status |
| 336 | + kafkaInstanceDetailMap["bandwidth"] = kafkaInstanceDetail.Bandwidth |
| 337 | + kafkaInstanceDetailMap["disk_size"] = kafkaInstanceDetail.DiskSize |
| 338 | + kafkaInstanceDetailMap["zone_id"] = kafkaInstanceDetail.ZoneId |
| 339 | + kafkaInstanceDetailMap["vpc_id"] = kafkaInstanceDetail.VpcId |
| 340 | + kafkaInstanceDetailMap["subnet_id"] = kafkaInstanceDetail.SubnetId |
| 341 | + kafkaInstanceDetailMap["renew_flag"] = kafkaInstanceDetail.RenewFlag |
| 342 | + kafkaInstanceDetailMap["healthy"] = kafkaInstanceDetail.Healthy |
| 343 | + kafkaInstanceDetailMap["healthy_message"] = kafkaInstanceDetail.HealthyMessage |
| 344 | + kafkaInstanceDetailMap["create_time"] = kafkaInstanceDetail.CreateTime |
| 345 | + kafkaInstanceDetailMap["expire_time"] = kafkaInstanceDetail.ExpireTime |
| 346 | + kafkaInstanceDetailMap["is_internal"] = kafkaInstanceDetail.IsInternal |
| 347 | + kafkaInstanceDetailMap["topic_num"] = kafkaInstanceDetail.TopicNum |
| 348 | + kafkaInstanceDetailMap["version"] = kafkaInstanceDetail.Version |
| 349 | + kafkaInstanceDetailMap["cvm"] = kafkaInstanceDetail.Cvm |
| 350 | + kafkaInstanceDetailMap["instance_type"] = kafkaInstanceDetail.InstanceType |
| 351 | + kafkaInstanceDetailMap["max_topic_number"] = kafkaInstanceDetail.MaxTopicNumber |
| 352 | + kafkaInstanceDetailMap["max_partition_number"] = kafkaInstanceDetail.MaxPartitionNumber |
| 353 | + kafkaInstanceDetailMap["rebalance_time"] = kafkaInstanceDetail.RebalanceTime |
| 354 | + kafkaInstanceDetailMap["partition_number"] = kafkaInstanceDetail.PartitionNumber |
| 355 | + kafkaInstanceDetailMap["public_network_charge_type"] = kafkaInstanceDetail.PublicNetworkChargeType |
| 356 | + kafkaInstanceDetailMap["public_network"] = kafkaInstanceDetail.PublicNetwork |
| 357 | + |
| 358 | + vipList := make([]map[string]string, 0) |
| 359 | + for _, vip := range kafkaInstanceDetail.VipList { |
| 360 | + vipList = append(vipList, map[string]string{ |
| 361 | + "vip": *vip.Vip, |
| 362 | + "vport": *vip.Vport, |
| 363 | + }) |
| 364 | + } |
| 365 | + kafkaInstanceDetailMap["vip_list"] = vipList |
| 366 | + |
| 367 | + tags := make([]map[string]string, 0) |
| 368 | + for _, tag := range kafkaInstanceDetail.Tags { |
| 369 | + tags = append(tags, map[string]string{ |
| 370 | + "tag_key": *tag.TagKey, |
| 371 | + "tag_value": *tag.TagValue, |
| 372 | + }) |
| 373 | + } |
| 374 | + kafkaInstanceDetailMap["tags"] = tags |
| 375 | + |
| 376 | + zoneIds := make([]int64, 0) |
| 377 | + for _, zoneId := range kafkaInstanceDetail.ZoneIds { |
| 378 | + zoneIds = append(zoneIds, *zoneId) |
| 379 | + } |
| 380 | + kafkaInstanceDetailMap["zone_ids"] = zoneIds |
| 381 | + |
| 382 | + result = append(result, kafkaInstanceDetailMap) |
| 383 | + } |
| 384 | + d.SetId(helper.DataResourceIdsHash(ids)) |
| 385 | + d.Set("instance_list", result) |
| 386 | + |
| 387 | + output, ok := d.GetOk("result_output_file") |
| 388 | + if ok && output.(string) != "" { |
| 389 | + if e := writeToFile(output.(string), result); e != nil { |
| 390 | + return e |
| 391 | + } |
| 392 | + } |
| 393 | + return nil |
| 394 | +} |
0 commit comments