文档文档

VMware vSphere Input Plugin

此插件从 vCenter 的 vSphere 服务器收集指标,包括集群、主机、资源池、虚拟机、数据存储和 vSAN 信息。

此插件需要 vSphere v7.0+。

引入于: Telegraf v1.8.0 标签: containers 操作系统支持: all

全局配置选项

插件支持其他全局和插件配置设置,用于修改指标、标签和字段,创建别名以及配置插件顺序等任务。更多详情请参阅 CONFIGURATION.md

Secret-store 支持

此插件支持从 secret-stores 获取 usernamepassword 选项的密钥。有关如何使用它们的更多详细信息,请参阅 secret-store 文档

配置

# Read metrics from one or many vCenters
[[inputs.vsphere]]
  ## List of vCenter URLs to be monitored. These three lines must be uncommented
  ## and edited for the plugin to work.
  vcenters = [ "https://vcenter.local/sdk" ]
  username = "user@corp.local"
  password = "secret"

  ## VMs
  ## Typical VM metrics (if omitted or empty, all metrics are collected)
  # vm_include = [ "/*/vm/**"] # Inventory path to VMs to collect (by default all are collected)
  # vm_exclude = [] # Inventory paths to exclude
  vm_metric_include = [
    "cpu.demand.average",
    "cpu.idle.summation",
    "cpu.latency.average",
    "cpu.readiness.average",
    "cpu.ready.summation",
    "cpu.run.summation",
    "cpu.usagemhz.average",
    "cpu.used.summation",
    "cpu.wait.summation",
    "mem.active.average",
    "mem.granted.average",
    "mem.latency.average",
    "mem.swapin.average",
    "mem.swapinRate.average",
    "mem.swapout.average",
    "mem.swapoutRate.average",
    "mem.usage.average",
    "mem.vmmemctl.average",
    "net.bytesRx.average",
    "net.bytesTx.average",
    "net.droppedRx.summation",
    "net.droppedTx.summation",
    "net.usage.average",
    "power.power.average",
    "virtualDisk.numberReadAveraged.average",
    "virtualDisk.numberWriteAveraged.average",
    "virtualDisk.read.average",
    "virtualDisk.readOIO.latest",
    "virtualDisk.throughput.usage.average",
    "virtualDisk.totalReadLatency.average",
    "virtualDisk.totalWriteLatency.average",
    "virtualDisk.write.average",
    "virtualDisk.writeOIO.latest",
    "sys.uptime.latest",
  ]
  # vm_metric_exclude = [] ## Nothing is excluded by default
  # vm_instances = true ## true by default

  ## Hosts
  ## Typical host metrics (if omitted or empty, all metrics are collected)
  # host_include = [ "/*/host/**"] # Inventory path to hosts to collect (by default all are collected)
  # host_exclude [] # Inventory paths to exclude
  host_metric_include = [
    "cpu.coreUtilization.average",
    "cpu.costop.summation",
    "cpu.demand.average",
    "cpu.idle.summation",
    "cpu.latency.average",
    "cpu.readiness.average",
    "cpu.ready.summation",
    "cpu.swapwait.summation",
    "cpu.usage.average",
    "cpu.usagemhz.average",
    "cpu.used.summation",
    "cpu.utilization.average",
    "cpu.wait.summation",
    "disk.deviceReadLatency.average",
    "disk.deviceWriteLatency.average",
    "disk.kernelReadLatency.average",
    "disk.kernelWriteLatency.average",
    "disk.numberReadAveraged.average",
    "disk.numberWriteAveraged.average",
    "disk.read.average",
    "disk.totalReadLatency.average",
    "disk.totalWriteLatency.average",
    "disk.write.average",
    "mem.active.average",
    "mem.latency.average",
    "mem.state.latest",
    "mem.swapin.average",
    "mem.swapinRate.average",
    "mem.swapout.average",
    "mem.swapoutRate.average",
    "mem.totalCapacity.average",
    "mem.usage.average",
    "mem.vmmemctl.average",
    "net.bytesRx.average",
    "net.bytesTx.average",
    "net.droppedRx.summation",
    "net.droppedTx.summation",
    "net.errorsRx.summation",
    "net.errorsTx.summation",
    "net.usage.average",
    "power.power.average",
    "storageAdapter.numberReadAveraged.average",
    "storageAdapter.numberWriteAveraged.average",
    "storageAdapter.read.average",
    "storageAdapter.write.average",
    "sys.uptime.latest",
  ]
    ## Collect IP addresses? Valid values are "ipv4" and "ipv6"
  # ip_addresses = ["ipv6", "ipv4" ]

  # host_metric_exclude = [] ## Nothing excluded by default
  # host_instances = true ## true by default


  ## Clusters
  # cluster_include = [ "/*/host/**"] # Inventory path to clusters to collect (by default all are collected)
  # cluster_exclude = [] # Inventory paths to exclude
  # cluster_metric_include = [] ## if omitted or empty, all metrics are collected
  # cluster_metric_exclude = [] ## Nothing excluded by default
  # cluster_instances = false ## false by default

  ## Resource Pools
  # resource_pool_include = [ "/*/host/**"] # Inventory path to resource pools to collect (by default all are collected)
  # resource_pool_exclude = [] # Inventory paths to exclude
  # resource_pool_metric_include = [] ## if omitted or empty, all metrics are collected
  # resource_pool_metric_exclude = [] ## Nothing excluded by default
  # resource_pool_instances = false ## false by default

  ## Datastores
  # datastore_include = [ "/*/datastore/**"] # Inventory path to datastores to collect (by default all are collected)
  # datastore_exclude = [] # Inventory paths to exclude
  # datastore_metric_include = [] ## if omitted or empty, all metrics are collected
  # datastore_metric_exclude = [] ## Nothing excluded by default
  # datastore_instances = false ## false by default

  ## Datacenters
  # datacenter_include = [ "/*/host/**"] # Inventory path to clusters to collect (by default all are collected)
  # datacenter_exclude = [] # Inventory paths to exclude
  datacenter_metric_include = [] ## if omitted or empty, all metrics are collected
  datacenter_metric_exclude = [ "*" ] ## Datacenters are not collected by default.
  # datacenter_instances = false ## false by default

  ## VSAN
  # vsan_metric_include = [] ## if omitted or empty, all metrics are collected
  # vsan_metric_exclude = [ "*" ] ## vSAN are not collected by default.
  ## Whether to skip verifying vSAN metrics against the ones from GetSupportedEntityTypes API.
  # vsan_metric_skip_verify = false ## false by default.

  ## Interval for sampling vSAN performance metrics, can be reduced down to
  ## 30 seconds for vSAN 8 U1.
  # vsan_interval = "5m"

  ## Plugin Settings
  ## separator character to use for measurement and field names (default: "_")
  # separator = "_"

  ## number of objects to retrieve per query for realtime resources (vms and hosts)
  ## set to 64 for vCenter 5.5 and 6.0 (default: 256)
  # max_query_objects = 256

  ## number of metrics to retrieve per query for non-realtime resources (clusters and datastores)
  ## set to 64 for vCenter 5.5 and 6.0 (default: 256)
  # max_query_metrics = 256

  ## number of go routines to use for collection and discovery of objects and metrics
  # collect_concurrency = 1
  # discover_concurrency = 1

  ## the interval before (re)discovering objects subject to metrics collection (default: 300s)
  # object_discovery_interval = "300s"

  ## timeout applies to any of the api request made to vcenter
  # timeout = "60s"

  ## When set to true, all samples are sent as integers. This makes the output
  ## data types backwards compatible with Telegraf 1.9 or lower. Normally all
  ## samples from vCenter, with the exception of percentages, are integer
  ## values, but under some conditions, some averaging takes place internally in
  ## the plugin. Setting this flag to "false" will send values as floats to
  ## preserve the full precision when averaging takes place.
  # use_int_samples = true

  ## Custom attributes from vCenter can be very useful for queries in order to slice the
  ## metrics along different dimension and for forming ad-hoc relationships. They are disabled
  ## by default, since they can add a considerable amount of tags to the resulting metrics. To
  ## enable, simply set custom_attribute_exclude to [] (empty set) and use custom_attribute_include
  ## to select the attributes you want to include.
  ## By default, since they can add a considerable amount of tags to the resulting metrics. To
  ## enable, simply set custom_attribute_exclude to [] (empty set) and use custom_attribute_include
  ## to select the attributes you want to include.
  # custom_attribute_include = []
  # custom_attribute_exclude = ["*"]

  ## The number of vSphere 5 minute metric collection cycles to look back for non-realtime metrics. In
  ## some versions (6.7, 7.0 and possible more), certain metrics, such as cluster metrics, may be reported
  ## with a significant delay (>30min). If this happens, try increasing this number. Please note that increasing
  ## it too much may cause performance issues.
  # metric_lookback = 3

  ## Optional SSL Config
  # ssl_ca = "/path/to/cafile"
  # ssl_cert = "/path/to/certfile"
  # ssl_key = "/path/to/keyfile"
  ## Use SSL but skip chain & host verification
  # insecure_skip_verify = false

  ## The Historical Interval value must match EXACTLY the interval in the daily
  # "Interval Duration" found on the VCenter server under Configure > General > Statistics > Statistic intervals
  # historical_interval = "5m"

  ## Specifies plugin behavior regarding disconnected servers
  ## Available choices :
  ##   - error: telegraf will return an error on startup if one the servers is unreachable
  ##   - ignore: telegraf will ignore unreachable servers on both startup and gather
  # disconnected_servers_behavior = "error"

  ## HTTP Proxy support
  # use_system_proxy = true
  # http_proxy_url = ""

注意:要禁用特定资源类型的收集,只需使用 XX_metric_exclude 排除所有指标。例如,要禁用 VM 的收集,请添加此配置

vm_metric_exclude = [ "*" ]

注意:要禁用特定资源类型的收集,只需使用 XX_metric_exclude 排除所有指标。例如,要禁用 VM 的收集,请添加此配置

每个查询的对象和指标

默认情况下,vCenter 配置对性能图表查询中包含的实体数量设置了限制。vCenter 6.5 及更高版本的默认设置为 256。vCenter 的早期版本设置为 64。vCenter 管理员可以更改此设置。有关更多信息,请参阅此 VMware KB 文章

任何修改都应通过修改 max_query_objects 参数反映在此插件中

  ## number of objects to retrieve per query for realtime resources (VMs and hosts)
  ## set to 64 for vCenter 5.5 and 6.0 (default: 256)
  # max_query_objects = 256

收集和发现并发

在大型 vCenter 设置中,可能需要多个并发 Go 例程来收集性能指标,以避免在收集周期内因时间流逝而产生的潜在错误。此值永远不应大于 8,尽管默认值 1(无并发)对于大多数配置来说应该足够了。

要设置并发,请修改 collect_concurrencydiscover_concurrency 参数。

  ## number of go routines to use for collection and discovery of objects and metrics
  # collect_concurrency = 1
  # discover_concurrency = 1

清单路径

可以使用清单路径选择要监控的资源。这会将 vSphere 清单视为一个类似于文件系统的树状结构。vSphere 清单具有类似的结构

<root>
+-DC0 # Virtual datacenter
   +-datastore # Datastore folder (created by system)
   | +-Datastore1
   +-host # Host folder (created by system)
   | +-Cluster1
   | | +-Host1
   | | | +-VM1
   | | | +-VM2
   | | | +-hadoop1
   | | +-ResourcePool1
   | | | +-VM3
   | | | +-VM4
   | +-Host2 # Dummy cluster created for non-clustered host
   | | +-Host2
   | | | +-VM5
   | | | +-VM6
   +-vm # VM folder (created by system)
   | +-VM1
   | +-VM2
   | +-Folder1
   | | +-hadoop1
   | | +-NestedFolder1
   | | | +-VM3
   | | | +-VM4

使用清单路径

使用熟悉的类 UNIX 路径,例如可以使用路径 /DC0/vm/VM2 选择 VM2。

通常,我们希望选择一组资源,例如文件夹中的所有 VM。我们可以使用路径 /DC0/vm/Folder1/* 来实现。

另一种选择对象的方法是使用部分名称,例如 /DC0/vm/Folder1/hadoop*,它会列出 Folder1 中名称以“hadoop”开头的所有 VM。

最后,由于文件夹结构的任意嵌套,我们需要一个“递归通配符”来遍历多个文件夹。我们为此使用“**”符号。如果我们想在任何文件夹中查找名称以“hadoop”开头的 VM,可以使用以下路径:/DC0/vm/**/hadoop*

VM 的多个路径

从上面的示例树中可以看出,VM 同时出现在数据中心下的其自身文件夹以及宿主机下。当您想选择特定宿主机上的 VM 时,这很有用。例如,/DC0/host/Cluster1/Host1/hadoop* 会选择在 Host1 上运行的所有名称以“hadoop”开头的 VM。

我们可以将其扩展到集群级别:/DC0/host/Cluster1/*/hadoop*。这会选择 Cluster1 中任何宿主机上匹配“hadoop*”的任何 VM。

清单路径和顶级文件夹

如果您的数据中心位于一个文件夹中,而不是直接位于清单根目录下方,则默认清单路径将不起作用。这是故意的,因为在非常大的环境中,递归通配符可能会很慢。

如果您的数据中心位于一个文件夹中,您有两种选择

  1. 在路径中显式包含文件夹。例如,如果您的数据中心位于名为 F1 的文件夹中,您可以使用路径 /F1/MyDatacenter/host/** 来访问您的宿主机。
  2. 使用递归通配符来搜索任意长度的嵌套文件夹链。要访问宿主机,您可以使用路径 /**/host/**。注意:在非常大的环境中,这可能会运行缓慢,因为会遍历大量节点。

性能考量

实时 vs. 历史指标

vCenter 保留两种不同类型的指标,称为实时指标和历史指标。

  • 实时指标:以 20 秒粒度可用。这些指标存储在内存中,查询速度非常快且成本低廉。我们的测试表明,可以在 20 秒内获取 7000 台虚拟机的一整套实时指标。实时指标仅在 ESXi 宿主机虚拟机资源上可用。实时指标在 vCenter 中仅存储 1 小时。
  • 历史指标:以(默认)5 分钟、30 分钟、2 小时和 24 小时的汇总级别可用。vSphere Telegraf 插件仅使用最精细的汇总,默认为 5 分钟,但可以在 vCenter 中更改为其他间隔时长。这些指标存储在 vCenter 数据库中,查询成本高且速度慢。历史指标是集群数据存储资源池数据中心唯一可用的指标类型。

这种区别会影响 Telegraf 如何收集指标。输入插件的单个实例只能有一个收集间隔,这意味着通常根据最频繁收集的指标来设置收集间隔。假设您将收集间隔设置为 1 分钟。所有实时指标将每分钟收集一次。由于历史指标仅以 5 分钟的间隔可用,vSphere Telegraf 插件会自动跳过这些指标的五分之四的收集周期。这在许多情况下都有效。当历史指标的收集时间超过收集间隔时,问题就会出现。这会导致 Telegraf 日志中出现类似此错误的日志消息

2019-01-16T13:41:10Z W! [agent] input "inputs.vsphere" did not complete within its interval

这将中断指标收集,并可能导致样本丢失。最佳实践的解决方法是指定两个 vSphere 插件实例,一个用于具有短收集间隔的实时指标,另一个用于具有长收集间隔的历史指标。您可以使用 *_metric_exclude 来关闭每个实例中您不想收集指标的资源。例如

## Realtime instance
[[inputs.vsphere]]
  interval = "60s"
  vcenters = [ "https://someaddress/sdk" ]
  username = "someuser@vsphere.local"
  password = "secret"

  insecure_skip_verify = true

  # Exclude all historical metrics
  datastore_metric_exclude = ["*"]
  cluster_metric_exclude = ["*"]
  datacenter_metric_exclude = ["*"]
  resource_pool_metric_exclude = ["*"]
  vsan_metric_exclude = ["*"]

  collect_concurrency = 5
  discover_concurrency = 5

# Historical instance
[[inputs.vsphere]]

  interval = "300s"

  vcenters = [ "https://someaddress/sdk" ]
  username = "someuser@vsphere.local"
  password = "secret"

  insecure_skip_verify = true
  host_metric_exclude = ["*"] # Exclude realtime metrics
  vm_metric_exclude = ["*"] # Exclude realtime metrics

  max_query_metrics = 256
  collect_concurrency = 3

配置 max_query_metrics 设置

max_query_metrics 确定一次调用 vCenter 时尝试检索的最大指标数量。总的来说,较高的数字意味着查询更快、更有效。但是,vCenter 中查询允许的指标数量通常受 vCenter 中 config.vpxd.stats.maxQueryMetrics 设置的限制。此值在 vSphere 5.5 及更早版本上默认为 64,在较新版本上默认为 256。vSphere 插件始终会检查此设置,并且如果 vCenter 中配置的限制低于插件中的 max_query_metrics,则会自动减少该数量。这将导致类似此日志消息的日志消息

2019-01-21T03:24:18Z W! [input.vsphere] Configured max_query_metrics is 256, but server limits it to 64. Reducing.

您可以要求 vCenter 管理员增加此限制以帮助提高性能。

集群指标和 max_query_metrics 设置

vCenter 处理集群指标的方式略有不同。它们是从 ESXi 和虚拟机指标聚合而来的,并且在查询它们的最新值时可能不可用。发生这种情况时,vCenter 会尝试即时执行此聚合。不幸的是,vCenter 内部执行此聚合所需的所有子查询都会计入 config.vpxd.stats.maxQueryMetrics。这意味着即使是小型查询也可能导致类似此错误的日志消息

2018-11-02T13:37:11Z E! Error in plugin [inputs.vsphere]: ServerFaultCode: This operation is restricted by the administrator - 'vpxd.stats.maxQueryMetrics'. Contact your system administrator

有两种方法可以解决此问题

  • 要求您的 vCenter 管理员将 config.vpxd.stats.maxQueryMetrics 设置为一个高于 vCenter 实例管理的虚拟机总数的数字。
  • 排除集群指标,并使用 basicstats 聚合器为每个集群计算总和与平均值,或者使用可视化工具中的查询获得相同的结果。

并发设置

vSphere 插件允许您指定两个并发设置

  • collect_concurrency:每个资源类型允许的最大并发性能指标查询数。
  • discover_concurrency:允许的最大并发资源发现查询数。

虽然较高的并发级别通常会对性能产生积极影响,但过高地增加这些数字可能会导致 vCenter 服务器的性能问题。经验法则是将这些参数设置为虚拟机数量除以 1500 并向上取整到最接近的整数。

配置 historical_interval 设置

当 vSphere 插件查询 vCenter 的历史统计信息时,它会查询在特定间隔存在的统计信息。默认历史间隔时长为 5 分钟,但如果此间隔已被更改,则必须在 vSphere 插件中覆盖默认查询间隔。

  • historical_interval:vSphere 中配置的最精细统计信息的间隔,以秒为单位。

Metrics

  • 集群统计信息
    • 集群服务:CPU、内存、故障转移
    • CPU:总数、使用率
    • 内存:已消耗、总数、vmmemctl
    • VM 操作:# 更改、克隆、创建、部署、销毁、电源、重启、重新配置、注册、重置、关机、待机、vMotion
  • 宿主机统计信息
    • CPU:总数、使用率、成本、MHz
    • 数据存储:IOPS、延迟、读/写字节数、# 读/写次数
    • 磁盘:命令、延迟、内核读/写、# 读/写次数、队列
    • 内存:总数、使用率、活动、延迟、交换、共享、vmmemctl
    • 网络:广播、字节数、丢弃、错误、多播、数据包、使用率
    • 电源:能耗、使用率、容量
    • 资源 CPU:活动、最大值、运行中
    • 存储适配器:命令、延迟、# 读/写次数
    • 存储路径:命令、延迟、# 读/写次数
    • 系统资源:CPU 活动、CPU 最大值、CPU 运行中、CPU 使用率、内存分配、内存消耗、内存共享、交换
    • 系统:正常运行时间
    • 闪存模块:活动的 VMDK
  • VM 统计信息
    • CPU:需求、使用率、就绪度、成本、MHz
    • 数据存储:延迟、# 读/写次数
    • 磁盘:命令、延迟、# 读/写次数、预留、使用率
    • 内存:已授予、已使用、活动、交换、vmmemctl、memorySizeMB(已分配)、memoryReservation
    • 网络:广播、字节数、丢弃、多播、数据包、使用率
    • 电源:能耗、使用率
    • 资源 CPU:活动、最大值、运行中
    • 系统:操作系统正常运行时间、正常运行时间
    • 虚拟磁盘:寻道、# 读/写次数、延迟、负载
  • 资源池统计信息
    • 内存:总数、使用率、活动、延迟、交换、共享、vmmemctl
    • CPU:容量、使用率、核心数
    • 磁盘:吞吐量
    • 网络:吞吐量
    • 电源:能耗、使用率
  • 数据存储统计信息
    • 磁盘:容量、预留、已使用
  • 数字传感器统计信息
    • CPU:温度

常用 vSphere 性能指标

vSphere 中的性能指标集是开放式的。在新版本中可能会添加或删除指标,并且可用指标集可能因硬件以及安装了哪些插件和附加产品而异。因此,提供可用指标的明确列表很困难。下面列出的指标是截至 vSphere 6.5 最常用的指标。

有关 vSphere 可用指标及其度量单位的完整列表,请参考 VMWare 产品文档VMWare 性能管理器文档

要列出您环境中确切的指标集,请使用 govc 工具。要获取例如 VM 的指标集,您可以使用以下命令

govc metric.ls vm/*

虚拟机指标

cpu.demandEntitlementRatio.latest
cpu.usage.average
cpu.ready.summation
cpu.run.summation
cpu.system.summation
cpu.swapwait.summation
cpu.costop.summation
cpu.demand.average
cpu.readiness.average
cpu.maxlimited.summation
cpu.wait.summation
cpu.usagemhz.average
cpu.latency.average
cpu.used.summation
cpu.overlap.summation
cpu.idle.summation
cpu.entitlement.latest
datastore.maxTotalLatency.latest
disk.usage.average
disk.read.average
disk.write.average
disk.maxTotalLatency.latest
mem.llSwapUsed.average
mem.swapin.average
mem.vmmemctltarget.average
mem.activewrite.average
mem.overhead.average
mem.vmmemctl.average
mem.zero.average
mem.swapoutRate.average
mem.active.average
mem.llSwapOutRate.average
mem.swapout.average
mem.llSwapInRate.average
mem.swapinRate.average
mem.granted.average
mem.latency.average
mem.overheadMax.average
mem.swapped.average
mem.compressionRate.average
mem.swaptarget.average
mem.shared.average
mem.zipSaved.latest
mem.overheadTouched.average
mem.zipped.latest
mem.consumed.average
mem.entitlement.average
mem.usage.average
mem.decompressionRate.average
mem.compressed.average
net.multicastRx.summation
net.transmitted.average
net.received.average
net.usage.average
net.broadcastTx.summation
net.broadcastRx.summation
net.packetsRx.summation
net.pnicBytesRx.average
net.multicastTx.summation
net.bytesTx.average
net.bytesRx.average
net.droppedRx.summation
net.pnicBytesTx.average
net.droppedTx.summation
net.packetsTx.summation
power.power.average
power.energy.summation
rescpu.runpk1.latest
rescpu.runpk15.latest
rescpu.maxLimited5.latest
rescpu.actpk5.latest
rescpu.samplePeriod.latest
rescpu.runav1.latest
rescpu.runav15.latest
rescpu.sampleCount.latest
rescpu.actpk1.latest
rescpu.runpk5.latest
rescpu.runav5.latest
rescpu.actav15.latest
rescpu.actav1.latest
rescpu.actpk15.latest
rescpu.actav5.latest
rescpu.maxLimited1.latest
rescpu.maxLimited15.latest
sys.osUptime.latest
sys.uptime.latest
sys.heartbeat.latest
virtualDisk.write.average
virtualDisk.read.average

宿主机系统指标

cpu.corecount.contention.average
cpu.usage.average
cpu.reservedCapacity.average
cpu.usagemhz.minimum
cpu.usagemhz.maximum
cpu.usage.minimum
cpu.usage.maximum
cpu.capacity.provisioned.average
cpu.capacity.usage.average
cpu.capacity.demand.average
cpu.capacity.contention.average
cpu.corecount.provisioned.average
cpu.corecount.usage.average
cpu.usagemhz.average
disk.throughput.contention.average
disk.throughput.usage.average
mem.decompressionRate.average
mem.granted.average
mem.active.average
mem.shared.average
mem.zero.average
mem.swapused.average
mem.vmmemctl.average
mem.compressed.average
mem.compressionRate.average
mem.reservedCapacity.average
mem.capacity.provisioned.average
mem.capacity.usable.average
mem.capacity.usage.average
mem.capacity.entitlement.average
mem.capacity.contention.average
mem.usage.minimum
mem.overhead.minimum
mem.consumed.minimum
mem.granted.minimum
mem.active.minimum
mem.shared.minimum
mem.zero.minimum
mem.swapused.minimum
mem.consumed.average
mem.usage.maximum
mem.overhead.maximum
mem.consumed.maximum
mem.granted.maximum
mem.overhead.average
mem.shared.maximum
mem.zero.maximum
mem.swapused.maximum
mem.vmmemctl.maximum
mem.usage.average
mem.active.maximum
mem.vmmemctl.minimum
net.throughput.contention.summation
net.throughput.usage.average
net.throughput.usable.average
net.throughput.provisioned.average
power.power.average
power.powerCap.average
power.energy.summation
vmop.numShutdownGuest.latest
vmop.numPoweroff.latest
vmop.numSuspend.latest
vmop.numReset.latest
vmop.numRebootGuest.latest
vmop.numStandbyGuest.latest
vmop.numPoweron.latest
vmop.numCreate.latest
vmop.numDestroy.latest
vmop.numRegister.latest
vmop.numUnregister.latest
vmop.numReconfigure.latest
vmop.numClone.latest
vmop.numDeploy.latest
vmop.numChangeHost.latest
vmop.numChangeDS.latest
vmop.numChangeHostDS.latest
vmop.numVMotion.latest
vmop.numSVMotion.latest
vmop.numXVMotion.latest

资源池指标

cpu.usagemhz.average
cpu.cpuentitlement.latest
cpu.usagemhz.minimum
cpu.usagemhz.maximum
cpu.capacity.entitlement.average
cpu.capacity.usage.average
cpu.capacity.demand.average
cpu.capacity.contention.average
cpu.corecount.provisioned.average
cpu.corecount.contention.average
disk.throughput.usage.average
disk.throughput.contention.average
mem.capacity.contention.average
mem.overhead.average
mem.consumed.average
mem.granted.average
mem.active.average
mem.shared.average
mem.zero.average
mem.swapped.average
mem.vmmemctl.average
mem.capacity.provisioned.average
mem.capacity.entitlement.average
mem.capacity.usage.average
mem.mementitlement.latest
mem.compressed.average
mem.compressionRate.average
mem.decompressionRate.average
mem.overhead.minimum
mem.consumed.minimum
mem.granted.minimum
mem.active.minimum
mem.shared.minimum
mem.zero.minimum
mem.swapped.minimum
mem.vmmemctl.maximum
mem.overhead.maximum
mem.consumed.maximum
mem.granted.maximum
mem.active.maximum
mem.shared.maximum
mem.zero.maximum
mem.swapped.maximum
mem.vmmemctl.minimum
net.throughput.usage.average
net.throughput.contention.summation
power.power.average
power.energy.summation

集群指标

cpu.corecount.contention.average
cpu.usage.average
cpu.reservedCapacity.average
cpu.usagemhz.minimum
cpu.usagemhz.maximum
cpu.usage.minimum
cpu.usage.maximum
cpu.capacity.provisioned.average
cpu.capacity.usage.average
cpu.capacity.demand.average
cpu.capacity.contention.average
cpu.corecount.provisioned.average
cpu.corecount.usage.average
cpu.usagemhz.average
disk.throughput.contention.average
disk.throughput.usage.average
mem.decompressionRate.average
mem.granted.average
mem.active.average
mem.shared.average
mem.zero.average
mem.swapused.average
mem.vmmemctl.average
mem.compressed.average
mem.compressionRate.average
mem.reservedCapacity.average
mem.capacity.provisioned.average
mem.capacity.usable.average
mem.capacity.usage.average
mem.capacity.entitlement.average
mem.capacity.contention.average
mem.usage.minimum
mem.overhead.minimum
mem.consumed.minimum
mem.granted.minimum
mem.active.minimum
mem.shared.minimum
mem.zero.minimum
mem.swapused.minimum
mem.consumed.average
mem.usage.maximum
mem.overhead.maximum
mem.consumed.maximum
mem.granted.maximum
mem.overhead.average
mem.shared.maximum
mem.zero.maximum
mem.swapused.maximum
mem.vmmemctl.maximum
mem.usage.average
mem.active.maximum
mem.vmmemctl.minimum
net.throughput.contention.summation
net.throughput.usage.average
net.throughput.usable.average
net.throughput.provisioned.average
power.power.average
power.powerCap.average
power.energy.summation
vmop.numShutdownGuest.latest
vmop.numPoweroff.latest
vmop.numSuspend.latest
vmop.numReset.latest
vmop.numRebootGuest.latest
vmop.numStandbyGuest.latest
vmop.numPoweron.latest
vmop.numCreate.latest
vmop.numDestroy.latest
vmop.numRegister.latest
vmop.numUnregister.latest
vmop.numReconfigure.latest
vmop.numClone.latest
vmop.numDeploy.latest
vmop.numChangeHost.latest
vmop.numChangeDS.latest
vmop.numChangeHostDS.latest
vmop.numVMotion.latest
vmop.numSVMotion.latest
vmop.numXVMotion.latest

数据存储指标

datastore.numberReadAveraged.average
datastore.throughput.contention.average
datastore.throughput.usage.average
datastore.write.average
datastore.read.average
datastore.numberWriteAveraged.average
disk.used.latest
disk.provisioned.latest
disk.capacity.latest
disk.capacity.contention.average
disk.capacity.provisioned.average
disk.capacity.usage.average

标签

  • 所有指标
    • vcenter (vcenter url)
  • 所有宿主机指标
    • cluster (vcenter cluster)
  • 所有 VM 指标
    • cluster (vcenter cluster)
    • esxhost (ESXi 宿主机名称)
    • guest (客户机操作系统 ID)
    • resource pool (资源池名称)
  • 宿主机和 VM 的 CPU 统计信息
    • cpu (CPU 核心 - 并非所有 CPU 字段都将具有此标签)
  • 宿主机和 VM 的数据存储统计信息
    • datastore (数据存储 ID)
  • 宿主机和 VM 的磁盘统计信息
    • disk (磁盘名称)
  • disk.used.capacity (数据存储)
    • disk (磁盘类型)
  • 宿主机和 VM 的网络统计信息
    • interface (网络接口名称)
  • 宿主机的存储适配器统计信息
    • adapter (存储适配器名称)
  • 宿主机的存储路径统计信息
    • path (存储路径 ID)
  • 宿主机的 sys.resource* 统计信息
    • resource (资源类型)
  • 宿主机的 vflashModule 统计信息
    • module (闪存模块名称)
  • VM 的 virtualDisk 统计信息
    • disk (虚拟磁盘名称)

添加 vSAN 扩展

vSAN 资源是一种特殊类型的资源,可以由插件收集。vSAN 资源的配置与主机、VM 和其他资源的配置略有不同。

vSAN 先决条件

  • vSphere 6.5 及更高版本
  • 启用了 vSAN 的集群
  • 启用 Virtual SAN 性能服务:创建 vSAN 集群时,性能服务处于禁用状态。要监控性能指标,您必须启用 vSAN 性能服务。

vSAN 配置

[[inputs.vsphere]]
  interval = "300s"
  vcenters = ["https://<vcenter-ip>/sdk", "https://<vcenter2-ip>/sdk"]
  username = "<user>"
  password = "<pwd>"

  # Exclude all other metrics
  vm_metric_exclude = ["*"]
  datastore_metric_exclude = ["*"]
  datacenter_metric_exclude = ["*"]
  host_metric_exclude = ["*"]
  cluster_metric_exclude = ["*"]

  # By default all supported entity will be included
  vsan_metric_include = [
    "summary.disk-usage",
    "summary.health",
    "summary.resync",
    "performance.cluster-domclient",
    "performance.cluster-domcompmgr",
    "performance.host-domclient",
    "performance.host-domcompmgr",
    "performance.cache-disk",
    "performance.disk-group",
    "performance.capacity-disk",
    "performance.disk-group",
    "performance.virtual-machine",
    "performance.vscsi",
    "performance.virtual-disk",
    "performance.vsan-host-net",
    "performance.vsan-vnic-net",
    "performance.vsan-pnic-net",
    "performance.vsan-iscsi-host",
    "performance.vsan-iscsi-target",
    "performance.vsan-iscsi-lun",
    "performance.lsom-world-cpu",
    "performance.nic-world-cpu",
    "performance.dom-world-cpu",
    "performance.cmmds-world-cpu",
    "performance.host-cpu",
    "performance.host-domowner",
    "performance.host-memory-slab",
    "performance.host-memory-heap",
    "performance.system-mem",
  ]
  # by default vsan_metric_skip_verify = false
  vsan_metric_skip_verify = true
  vsan_metric_exclude = [ ]
  # vsan_cluster_include = [ "/*/host/**" ] # Inventory path to clusters to collect (by default all are collected)

  collect_concurrency = 5
  discover_concurrency = 5

  ## Optional SSL Config
  # ssl_ca = "/path/to/cafile"
  # ssl_cert = "/path/to/certfile"
  # ssl_key = "/path/to/keyfile"
  ## Use SSL but skip chain & host verification
  # insecure_skip_verify = false

使用 vsan_metric_include = [...] 定义您要收集的 vSAN 指标。例如

  vsan_metric_include = ["summary.*", "performance.host-domclient", "performance.cache-disk", "performance.disk-group", "performance.capacity-disk"]

要包含所有支持的 vSAN 指标,请使用 vsan_metric_include = [ "*" ]。要禁用所有 vSAN 指标,请使用 vsan_metric_exclude = [ "*" ]

vsan_metric_skip_verify 定义是否跳过验证 vSAN 指标与 GetSupportedEntityTypes API 中的指标。提供此选项是因为某些性能实体未通过 API 返回,但如果我们确实需要这些统计信息,我们希望提供灵活性。当设置为 false 时,不在支持实体列表中的任何内容都将被过滤掉。当设置为 true 时,查询的指标将与 vsan_metric_include 相同,并且在这种情况下将不使用 exclusive 数组。默认值为 false。

vsan_cluster_include 定义将用于选择部分 vSAN 集群的清单路径列表。vSAN 指标仅在集群级别收集。因此,请使用与 vSphere 集群相同的清单路径方式。

许多 vCenter 环境使用自签名证书。更新上述配置的底部,并为适用于您的 vSphere 环境的所有适用的 SSL 配置设置提供正确的值。在某些环境中,当 SSL 证书不可用时,将需要设置 insecure_skip_verify = true。

为了确保在更大的 vSphere 环境中进行一致的收集,您必须增加插件的并发性。使用 collect_concurrency 设置来控制并发。将 collect_concurrency 设置为虚拟机数量除以 1500 并向上取整到最接近的整数。例如,对于 1200 台 VM,使用 1;对于 2300 台 VM,使用 2。

测量值 & 字段

注意:根据 vSAN 版本,vSAN 性能测量和字段可能会有所不同。

  • vSAN 摘要

    • overall_health
    • total_capacity_bytes, free_capacity_bytes
    • total_objects_to_sync, total_recovery_eta
  • vSAN 性能

    • cluster-domclient
      • iops_read, throughput_read, latency_avg_read, iops_write, throughput_write, latency_avg_write, congestion, oio
    • cluster-domcompmgr
      • iops_read, throughput_read, latency_avg_read, iops_write, throughput_write, latency_avg_write, iops_rec_write, throughput_rec_write, latency_avg_rec_write, congestion, oio, iops_resync_read, tput_resync_read, lat_avg_resyncread
    • host-domclient
      • iops_read, throughput_read, latency_avg_read, read_count, iops_write, throughput_write, latency_avg_write, write_count, congestion, oio, client_cache_hits, client_cache_hit_rate
    • host-domcompmgr
      • iops_read, throughput_read, latency_avg_read, read_count, iops_write, throughput_write, latency_avg_write, write_count, iops_rec_write, throughput_rec_write, latency_avg_rec_write, rec_write_count congestion, oio, iops_resync_read, tput_resync_read, lat_avg_resync_read
    • cache-disk
      • iops_dev_read, throughput_dev_read, latency_dev_read, io_count_dev_read, iops_dev_write, throughput_dev_write, latency_dev_write, io_count_dev_write, latency_dev_d_avg, latency_dev_g_avg
    • capacity-disk
      • iops_dev_read, throughput_dev_read, latency_dev_read, io_count_dev_read, iops_dev_write, throughput_dev_write, latency_dev_write, io_count_dev_write, latency_dev_d_avg, latency_dev_g_avg, iops_read, latency_read, io_count_read, iops_write, latency_write, io_count_write
    • disk-group
      • iops_sched, latency_sched, outstanding_bytes_sched, iops_sched_queue_rec, throughput_sched_queue_rec,latency_sched_queue_rec, iops_sched_queue_vm, throughput_sched_queue_vm,latency_sched_queue_vm, iops_sched_queue_meta, throughput_sched_queue_meta,latency_sched_queue_meta, iops_delay_pct_sched, latency_delay_sched, rc_hit_rate, wb_free_pct, war_evictions, quota_evictions, iops_rc_read, latency_rc_read, io_count_rc_read, iops_wb_read, latency_wb_read, io_count_wb_read, iops_rc_write, latency_rc_write, io_count_rc_write, iops_wb_write, latency_wb_write, io_count_wb_write, ssd_bytes_drained, zero_bytes_drained, mem_congestion, slab_congestion, ssd_congestion, iops_congestion, log_congestion, comp_congestion, iops_direct_sched, iops_read, throughput_read, latency_avg_read, read_count, iops_write, throughput_write, latency_avg_write, write_count, oio_write, oio_rec_write, oio_write_size, oio_rec_write_size, rc_size, wb_size, capacity, capacity_used, capacity_reserved, throughput_sched, iops_resync_read_policy, iops_resync_read_decom, iops_resync_read_rebalance, iops_resync_read_fix_comp, iops_resync_write_policy, iops_resync_write_decom, iops_resync_write_rebalance, iops_resync_write_fix_comp, tput_resync_read_policy, tput_resync_read_decom, tput_resync_read_rebalance, tput_resync_read_fix_comp, tput_resync_write_policy, tput_resync_write_decom, tput_resync_write_rebalance, tput_resync_write_fix_comp, lat_resync_read_policy, lat_resync_read_decom, lat_resync_read_rebalance, lat_resync_read_fix_comp, lat_resync_write_policy, lat_resync_write_decom, lat_resync_write_rebalance, lat_resync_write_fix_comp
    • virtual-machine
      • iops_read, throughput_read, latency_read_avg, latency_read_stddev, read_count, iops_write, throughput_write, latency_write_avg, latency_write_stddev, write_count
    • vscsi
      • iops_read, throughput_read, latency_read, read_count, iops_write, throughput_write, latency_write, write_count
    • virtual-disk
      • iops_limit, niops, niops_delayed
    • vsan-host-net
      • rx_throughput, rx_packets, rx_packets_loss_rate, tx_throughput, tx_packets, tx_packets_loss_rate
    • vsan-vnic-net
      • rx_throughput, rx_packets, rx_packets_loss_rate, tx_throughput, tx_packets, tx_packets_loss_rate
    • vsan-pnic-net
      • rx_throughput, rx_packets, rx_packets_loss_rate, tx_throughput, tx_packets, tx_packets_loss_rate
    • vsan-iscsi-host
      • iops_read, iops_write, iops_total, bandwidth_read, bandwidth_write, bandwidth_total, latency_read, latency_write, latency_total, queue_depth
    • vsan-iscsi-target
      • iops_read, iops_write, iops_total, bandwidth_read, bandwidth_write, bandwidth_total, latency_read, latency_write, latency_total, queue_depth
    • vsan-iscsi-lun
      • iops_read, iops_write, iops_total, bandwidth_read, bandwidth_write, bandwidth_total, latency_read, latency_write, latency_total, queue_depth

vSAN 标签

  • 所有 vSAN 指标
    • vcenter
    • dcname
    • clustername
    • moid (集群的管理对象 ID)
  • host-domclient, host-domcompmgr
    • hostname
  • disk-group, cache-disk, capacity-disk
    • hostname
    • deviceName
    • ssdUuid (如果是 SSD)
  • vsan-host-net
    • hostname
  • vsan-pnic-net
    • pnic
  • vsan-vnic-net
    • vnic
    • stackName

vSAN 中的实时 vs. 历史指标

vSAN 指标也保留两种不同类型的指标——实时指标和历史指标。

  • 实时指标是以“summary”为前缀的指标。这些指标可实时获取。
  • 历史指标是以“performance”为前缀的指标。这些指标是从 vSAN 性能 API 查询的,该 API 以 5 分钟的汇总级别可用。

出于性能考虑,最好指定两个插件实例,一个用于具有短收集间隔的实时指标,第二个用于具有长收集间隔的历史指标。例如

## Realtime instance
[[inputs.vsphere]]
  interval = "30s"
  vcenters = [ "https://someaddress/sdk" ]
  username = "someuser@vsphere.local"
  password = "secret"

  insecure_skip_verify = true

  # Exclude all other metrics
  vm_metric_exclude = ["*"]
  datastore_metric_exclude = ["*"]
  datacenter_metric_exclude = ["*"]
  host_metric_exclude = ["*"]
  cluster_metric_exclude = ["*"]

  vsan_metric_include = [ "summary.*" ]
  vsan_metric_exclude = [ ]
  vsan_metric_skip_verify = false

  collect_concurrency = 5
  discover_concurrency = 5

# Historical instance
[[inputs.vsphere]]

  interval = "300s"
  vcenters = [ "https://someaddress/sdk" ]
  username = "someuser@vsphere.local"
  password = "secret"

  insecure_skip_verify = true

  # Exclude all other metrics
  vm_metric_exclude = ["*"]
  datastore_metric_exclude = ["*"]
  datacenter_metric_exclude = ["*"]
  host_metric_exclude = ["*"]
  cluster_metric_exclude = ["*"]

  vsan_metric_include = [ "performance.*" ]
  vsan_metric_exclude = [ ]
  vsan_metric_skip_verify = false

  collect_concurrency = 5
  discover_concurrency = 5

示例输出

vsphere_vm_cpu,esxhostname=DC0_H0,guest=other,host=host.example.com,moid=vm-35,os=Mac,source=DC0_H0_VM0,vcenter=localhost:8989,vmname=DC0_H0_VM0 run_summation=2608i,ready_summation=129i,usage_average=5.01,used_summation=2134i,demand_average=326i 1535660299000000000
vsphere_vm_net,esxhostname=DC0_H0,guest=other,host=host.example.com,moid=vm-35,os=Mac,source=DC0_H0_VM0,vcenter=localhost:8989,vmname=DC0_H0_VM0 bytesRx_average=321i,bytesTx_average=335i 1535660299000000000
vsphere_vm_virtualDisk,esxhostname=DC0_H0,guest=other,host=host.example.com,moid=vm-35,os=Mac,source=DC0_H0_VM0,vcenter=localhost:8989,vmname=DC0_H0_VM0 write_average=144i,read_average=4i 1535660299000000000
vsphere_vm_net,esxhostname=DC0_H0,guest=other,host=host.example.com,moid=vm-38,os=Mac,source=DC0_H0_VM1,vcenter=localhost:8989,vmname=DC0_H0_VM1 bytesRx_average=242i,bytesTx_average=308i 1535660299000000000
vsphere_vm_virtualDisk,esxhostname=DC0_H0,guest=other,host=host.example.com,moid=vm-38,os=Mac,source=DC0_H0_VM1,vcenter=localhost:8989,vmname=DC0_H0_VM1 write_average=232i,read_average=4i 1535660299000000000
vsphere_vm_cpu,esxhostname=DC0_H0,guest=other,host=host.example.com,moid=vm-38,os=Mac,source=DC0_H0_VM1,vcenter=localhost:8989,vmname=DC0_H0_VM1 usage_average=5.49,used_summation=1804i,demand_average=308i,run_summation=2001i,ready_summation=120i 1535660299000000000
vsphere_vm_cpu,clustername=DC0_C0,esxhostname=DC0_C0_H0,guest=other,host=host.example.com,moid=vm-41,os=Mac,source=DC0_C0_RP0_VM0,vcenter=localhost:8989,vmname=DC0_C0_RP0_VM0 usage_average=4.19,used_summation=2108i,demand_average=285i,run_summation=1793i,ready_summation=93i 1535660299000000000
vsphere_vm_net,clustername=DC0_C0,esxhostname=DC0_C0_H0,guest=other,host=host.example.com,moid=vm-41,os=Mac,source=DC0_C0_RP0_VM0,vcenter=localhost:8989,vmname=DC0_C0_RP0_VM0 bytesRx_average=272i,bytesTx_average=419i 1535660299000000000
vsphere_vm_virtualDisk,clustername=DC0_C0,esxhostname=DC0_C0_H0,guest=other,host=host.example.com,moid=vm-41,os=Mac,source=DC0_C0_RP0_VM0,vcenter=localhost:8989,vmname=DC0_C0_RP0_VM0 write_average=229i,read_average=4i 1535660299000000000
vsphere_vm_cpu,clustername=DC0_C0,esxhostname=DC0_C0_H0,guest=other,host=host.example.com,moid=vm-44,os=Mac,source=DC0_C0_RP0_VM1,vcenter=localhost:8989,vmname=DC0_C0_RP0_VM1 run_summation=2277i,ready_summation=118i,usage_average=4.67,used_summation=2546i,demand_average=289i 1535660299000000000
vsphere_vm_net,clustername=DC0_C0,esxhostname=DC0_C0_H0,guest=other,host=host.example.com,moid=vm-44,os=Mac,source=DC0_C0_RP0_VM1,vcenter=localhost:8989,vmname=DC0_C0_RP0_VM1 bytesRx_average=243i,bytesTx_average=296i 1535660299000000000
vsphere_vm_virtualDisk,clustername=DC0_C0,esxhostname=DC0_C0_H0,guest=other,host=host.example.com,moid=vm-44,os=Mac,source=DC0_C0_RP0_VM1,vcenter=localhost:8989,vmname=DC0_C0_RP0_VM1 write_average=158i,read_average=4i 1535660299000000000
vsphere_host_net,esxhostname=DC0_H0,host=host.example.com,interface=vmnic0,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 usage_average=1042i,bytesTx_average=753i,bytesRx_average=660i 1535660299000000000
vsphere_host_cpu,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 utilization_average=10.46,usage_average=22.4,readiness_average=0.4,costop_summation=2i,coreUtilization_average=19.61,wait_summation=5148518i,idle_summation=58581i,latency_average=0.6,ready_summation=13370i,used_summation=19219i 1535660299000000000
vsphere_host_cpu,cpu=0,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 coreUtilization_average=25.6,utilization_average=11.58,used_summation=24306i,usage_average=24.26,idle_summation=86688i 1535660299000000000
vsphere_host_cpu,cpu=1,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 coreUtilization_average=12.29,utilization_average=8.32,used_summation=31312i,usage_average=22.47,idle_summation=94934i 1535660299000000000
vsphere_host_disk,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 read_average=331i,write_average=2800i 1535660299000000000
vsphere_host_disk,disk=/var/folders/rf/txwdm4pj409f70wnkdlp7sz80000gq/T/govcsim-DC0-LocalDS_0-367088371@folder-5,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 write_average=2701i,read_average=258i 1535660299000000000
vsphere_host_mem,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 usage_average=93.27 1535660299000000000
vsphere_host_net,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 bytesTx_average=650i,usage_average=1414i,bytesRx_average=569i 1535660299000000000
vsphere_host_cpu,clustername=DC0_C0,cpu=1,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 utilization_average=12.6,used_summation=25775i,usage_average=24.44,idle_summation=68886i,coreUtilization_average=17.59 1535660299000000000
vsphere_host_disk,clustername=DC0_C0,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 read_average=340i,write_average=2340i 1535660299000000000
vsphere_host_disk,clustername=DC0_C0,disk=/var/folders/rf/txwdm4pj409f70wnkdlp7sz80000gq/T/govcsim-DC0-LocalDS_0-367088371@folder-5,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 write_average=2277i,read_average=282i 1535660299000000000
vsphere_host_mem,clustername=DC0_C0,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 usage_average=104.78 1535660299000000000
vsphere_host_net,clustername=DC0_C0,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 bytesTx_average=463i,usage_average=1131i,bytesRx_average=719i 1535660299000000000
vsphere_host_net,clustername=DC0_C0,esxhostname=DC0_C0_H0,host=host.example.com,interface=vmnic0,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 usage_average=1668i,bytesTx_average=838i,bytesRx_average=921i 1535660299000000000
vsphere_host_cpu,clustername=DC0_C0,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 used_summation=28952i,utilization_average=11.36,idle_summation=93261i,latency_average=0.46,ready_summation=12837i,usage_average=21.56,readiness_average=0.39,costop_summation=2i,coreUtilization_average=27.19,wait_summation=3820829i 1535660299000000000
vsphere_host_cpu,clustername=DC0_C0,cpu=0,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 coreUtilization_average=24.12,utilization_average=13.83,used_summation=22462i,usage_average=24.69,idle_summation=96993i 1535660299000000000
internal_vsphere,host=host.example.com,os=Mac,vcenter=localhost:8989 connect_ns=4727607i,discover_ns=65389011i,discovered_objects=8i 1535660309000000000
internal_vsphere,host=host.example.com,os=Mac,resourcetype=datastore,vcenter=localhost:8989 gather_duration_ns=296223i,gather_count=0i 1535660309000000000
internal_vsphere,host=host.example.com,os=Mac,resourcetype=vm,vcenter=192.168.1.151 gather_duration_ns=136050i,gather_count=0i 1535660309000000000
internal_vsphere,host=host.example.com,os=Mac,resourcetype=host,vcenter=localhost:8989 gather_count=62i,gather_duration_ns=8788033i 1535660309000000000
internal_vsphere,host=host.example.com,os=Mac,resourcetype=host,vcenter=192.168.1.151 gather_count=0i,gather_duration_ns=162002i 1535660309000000000
internal_gather,host=host.example.com,input=vsphere,os=Mac gather_time_ns=17483653i,metrics_gathered=28i 1535660309000000000
internal_vsphere,host=host.example.com,os=Mac,vcenter=192.168.1.151 connect_ns=0i 1535660309000000000
internal_vsphere,host=host.example.com,os=Mac,resourcetype=vm,vcenter=localhost:8989 gather_duration_ns=7291897i,gather_count=36i 1535660309000000000
internal_vsphere,host=host.example.com,os=Mac,resourcetype=datastore,vcenter=192.168.1.151 gather_duration_ns=958474i,gather_count=0i 1535660309000000000
vsphere_vm_cpu,esxhostname=DC0_H0,guest=other,host=host.example.com,moid=vm-38,os=Mac,source=DC0_H0_VM1,vcenter=localhost:8989,vmname=DC0_H0_VM1 usage_average=8.82,used_summation=3192i,demand_average=283i,run_summation=2419i,ready_summation=115i 1535660319000000000
vsphere_vm_net,esxhostname=DC0_H0,guest=other,host=host.example.com,moid=vm-38,os=Mac,source=DC0_H0_VM1,vcenter=localhost:8989,vmname=DC0_H0_VM1 bytesRx_average=277i,bytesTx_average=343i 1535660319000000000
vsphere_vm_virtualDisk,esxhostname=DC0_H0,guest=other,host=host.example.com,moid=vm-38,os=Mac,source=DC0_H0_VM1,vcenter=localhost:8989,vmname=DC0_H0_VM1 read_average=1i,write_average=741i 1535660319000000000
vsphere_vm_net,clustername=DC0_C0,esxhostname=DC0_C0_H0,guest=other,host=host.example.com,moid=vm-41,os=Mac,source=DC0_C0_RP0_VM0,vcenter=localhost:8989,vmname=DC0_C0_RP0_VM0 bytesRx_average=386i,bytesTx_average=369i 1535660319000000000
vsphere_vm_virtualDisk,clustername=DC0_C0,esxhostname=DC0_C0_H0,guest=other,host=host.example.com,moid=vm-41,os=Mac,source=DC0_C0_RP0_VM0,vcenter=localhost:8989,vmname=DC0_C0_RP0_VM0 write_average=814i,read_average=1i 1535660319000000000
vsphere_vm_cpu,clustername=DC0_C0,esxhostname=DC0_C0_H0,guest=other,host=host.example.com,moid=vm-41,os=Mac,source=DC0_C0_RP0_VM0,vcenter=localhost:8989,vmname=DC0_C0_RP0_VM0 run_summation=1778i,ready_summation=111i,usage_average=7.54,used_summation=2339i,demand_average=297i 1535660319000000000
vsphere_vm_cpu,clustername=DC0_C0,esxhostname=DC0_C0_H0,guest=other,host=host.example.com,moid=vm-44,os=Mac,source=DC0_C0_RP0_VM1,vcenter=localhost:8989,vmname=DC0_C0_RP0_VM1 usage_average=6.98,used_summation=2125i,demand_average=211i,run_summation=2990i,ready_summation=141i 1535660319000000000
vsphere_vm_net,clustername=DC0_C0,esxhostname=DC0_C0_H0,guest=other,host=host.example.com,moid=vm-44,os=Mac,source=DC0_C0_RP0_VM1,vcenter=localhost:8989,vmname=DC0_C0_RP0_VM1 bytesRx_average=357i,bytesTx_average=268i 1535660319000000000
vsphere_vm_virtualDisk,clustername=DC0_C0,esxhostname=DC0_C0_H0,guest=other,host=host.example.com,moid=vm-44,os=Mac,source=DC0_C0_RP0_VM1,vcenter=localhost:8989,vmname=DC0_C0_RP0_VM1 write_average=528i,read_average=1i 1535660319000000000
vsphere_vm_cpu,esxhostname=DC0_H0,guest=other,host=host.example.com,moid=vm-35,os=Mac,source=DC0_H0_VM0,vcenter=localhost:8989,vmname=DC0_H0_VM0 used_summation=2374i,demand_average=195i,run_summation=3454i,ready_summation=110i,usage_average=7.34 1535660319000000000
vsphere_vm_net,esxhostname=DC0_H0,guest=other,host=host.example.com,moid=vm-35,os=Mac,source=DC0_H0_VM0,vcenter=localhost:8989,vmname=DC0_H0_VM0 bytesRx_average=308i,bytesTx_average=246i 1535660319000000000
vsphere_vm_virtualDisk,esxhostname=DC0_H0,guest=other,host=host.example.com,moid=vm-35,os=Mac,source=DC0_H0_VM0,vcenter=localhost:8989,vmname=DC0_H0_VM0 write_average=1178i,read_average=1i 1535660319000000000
vsphere_host_net,esxhostname=DC0_H0,host=host.example.com,interface=vmnic0,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 bytesRx_average=773i,usage_average=1521i,bytesTx_average=890i 1535660319000000000
vsphere_host_cpu,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 wait_summation=3421258i,idle_summation=67994i,latency_average=0.36,usage_average=29.86,readiness_average=0.37,used_summation=25244i,costop_summation=2i,coreUtilization_average=21.94,utilization_average=17.19,ready_summation=15897i 1535660319000000000
vsphere_host_cpu,cpu=0,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 utilization_average=11.32,used_summation=19333i,usage_average=14.29,idle_summation=92708i,coreUtilization_average=27.68 1535660319000000000
vsphere_host_cpu,cpu=1,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 used_summation=28596i,usage_average=25.32,idle_summation=79553i,coreUtilization_average=28.01,utilization_average=11.33 1535660319000000000
vsphere_host_disk,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 read_average=86i,write_average=1659i 1535660319000000000
vsphere_host_disk,disk=/var/folders/rf/txwdm4pj409f70wnkdlp7sz80000gq/T/govcsim-DC0-LocalDS_0-367088371@folder-5,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 write_average=1997i,read_average=58i 1535660319000000000
vsphere_host_mem,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 usage_average=68.45 1535660319000000000
vsphere_host_net,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 bytesTx_average=679i,usage_average=2286i,bytesRx_average=719i 1535660319000000000
vsphere_host_cpu,clustername=DC0_C0,cpu=1,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 utilization_average=10.52,used_summation=21693i,usage_average=23.09,idle_summation=84590i,coreUtilization_average=29.92 1535660319000000000
vsphere_host_disk,clustername=DC0_C0,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 read_average=113i,write_average=1236i 1535660319000000000
vsphere_host_disk,clustername=DC0_C0,disk=/var/folders/rf/txwdm4pj409f70wnkdlp7sz80000gq/T/govcsim-DC0-LocalDS_0-367088371@folder-5,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 write_average=1708i,read_average=110i 1535660319000000000
vsphere_host_mem,clustername=DC0_C0,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 usage_average=111.46 1535660319000000000
vsphere_host_net,clustername=DC0_C0,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 bytesTx_average=998i,usage_average=2000i,bytesRx_average=881i 1535660319000000000
vsphere_host_net,clustername=DC0_C0,esxhostname=DC0_C0_H0,host=host.example.com,interface=vmnic0,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 usage_average=1683i,bytesTx_average=675i,bytesRx_average=1078i 1535660319000000000
vsphere_host_cpu,clustername=DC0_C0,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 used_summation=28531i,wait_summation=3139129i,utilization_average=9.99,idle_summation=98579i,latency_average=0.51,costop_summation=2i,coreUtilization_average=14.35,ready_summation=16121i,usage_average=34.19,readiness_average=0.4 1535660319000000000
vsphere_host_cpu,clustername=DC0_C0,cpu=0,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 utilization_average=12.2,used_summation=22750i,usage_average=18.84,idle_summation=99539i,coreUtilization_average=23.05 1535660319000000000
internal_vsphere,host=host.example.com,os=Mac,resourcetype=host,vcenter=localhost:8989 gather_duration_ns=7076543i,gather_count=62i 1535660339000000000
internal_vsphere,host=host.example.com,os=Mac,resourcetype=host,vcenter=192.168.1.151 gather_duration_ns=4051303i,gather_count=0i 1535660339000000000
internal_gather,host=host.example.com,input=vsphere,os=Mac metrics_gathered=56i,gather_time_ns=13555029i 1535660339000000000
internal_vsphere,host=host.example.com,os=Mac,vcenter=192.168.1.151 connect_ns=0i 1535660339000000000
internal_vsphere,host=host.example.com,os=Mac,resourcetype=vm,vcenter=localhost:8989 gather_duration_ns=6335467i,gather_count=36i 1535660339000000000
internal_vsphere,host=host.example.com,os=Mac,resourcetype=datastore,vcenter=192.168.1.151 gather_duration_ns=958474i,gather_count=0i 1535660339000000000
internal_vsphere,host=host.example.com,os=Mac,vcenter=localhost:8989 discover_ns=65389011i,discovered_objects=8i,connect_ns=4727607i 1535660339000000000
internal_vsphere,host=host.example.com,os=Mac,resourcetype=datastore,vcenter=localhost:8989 gather_duration_ns=296223i,gather_count=0i 1535660339000000000
internal_vsphere,host=host.example.com,os=Mac,resourcetype=vm,vcenter=192.168.1.151 gather_count=0i,gather_duration_ns=1540920i 1535660339000000000
vsphere_vm_virtualDisk,esxhostname=DC0_H0,guest=other,host=host.example.com,moid=vm-35,os=Mac,source=DC0_H0_VM0,vcenter=localhost:8989,vmname=DC0_H0_VM0 write_average=302i,read_average=11i 1535660339000000000
vsphere_vm_cpu,esxhostname=DC0_H0,guest=other,host=host.example.com,moid=vm-35,os=Mac,source=DC0_H0_VM0,vcenter=localhost:8989,vmname=DC0_H0_VM0 usage_average=5.58,used_summation=2941i,demand_average=298i,run_summation=3255i,ready_summation=96i 1535660339000000000
vsphere_vm_net,esxhostname=DC0_H0,guest=other,host=host.example.com,moid=vm-35,os=Mac,source=DC0_H0_VM0,vcenter=localhost:8989,vmname=DC0_H0_VM0 bytesRx_average=155i,bytesTx_average=241i 1535660339000000000
vsphere_vm_cpu,esxhostname=DC0_H0,guest=other,host=host.example.com,moid=vm-38,os=Mac,source=DC0_H0_VM1,vcenter=localhost:8989,vmname=DC0_H0_VM1 usage_average=10.3,used_summation=3053i,demand_average=346i,run_summation=3289i,ready_summation=122i 1535660339000000000
vsphere_vm_net,esxhostname=DC0_H0,guest=other,host=host.example.com,moid=vm-38,os=Mac,source=DC0_H0_VM1,vcenter=localhost:8989,vmname=DC0_H0_VM1 bytesRx_average=215i,bytesTx_average=275i 1535660339000000000
vsphere_vm_virtualDisk,esxhostname=DC0_H0,guest=other,host=host.example.com,moid=vm-38,os=Mac,source=DC0_H0_VM1,vcenter=localhost:8989,vmname=DC0_H0_VM1 write_average=252i,read_average=14i 1535660339000000000
vsphere_vm_cpu,clustername=DC0_C0,esxhostname=DC0_C0_H0,guest=other,host=host.example.com,moid=vm-41,os=Mac,source=DC0_C0_RP0_VM0,vcenter=localhost:8989,vmname=DC0_C0_RP0_VM0 usage_average=8,used_summation=2183i,demand_average=354i,run_summation=3542i,ready_summation=128i 1535660339000000000
vsphere_vm_net,clustername=DC0_C0,esxhostname=DC0_C0_H0,guest=other,host=host.example.com,moid=vm-41,os=Mac,source=DC0_C0_RP0_VM0,vcenter=localhost:8989,vmname=DC0_C0_RP0_VM0 bytesRx_average=178i,bytesTx_average=200i 1535660339000000000
vsphere_vm_virtualDisk,clustername=DC0_C0,esxhostname=DC0_C0_H0,guest=other,host=host.example.com,moid=vm-41,os=Mac,source=DC0_C0_RP0_VM0,vcenter=localhost:8989,vmname=DC0_C0_RP0_VM0 write_average=283i,read_average=12i 1535660339000000000
vsphere_vm_cpu,clustername=DC0_C0,esxhostname=DC0_C0_H0,guest=other,host=host.example.com,moid=vm-44,os=Mac,source=DC0_C0_RP0_VM1,vcenter=localhost:8989,vmname=DC0_C0_RP0_VM1 demand_average=328i,run_summation=3481i,ready_summation=122i,usage_average=7.95,used_summation=2167i 1535660339000000000
vsphere_vm_net,clustername=DC0_C0,esxhostname=DC0_C0_H0,guest=other,host=host.example.com,moid=vm-44,os=Mac,source=DC0_C0_RP0_VM1,vcenter=localhost:8989,vmname=DC0_C0_RP0_VM1 bytesTx_average=282i,bytesRx_average=196i 1535660339000000000
vsphere_vm_virtualDisk,clustername=DC0_C0,esxhostname=DC0_C0_H0,guest=other,host=host.example.com,moid=vm-44,os=Mac,source=DC0_C0_RP0_VM1,vcenter=localhost:8989,vmname=DC0_C0_RP0_VM1 write_average=321i,read_average=13i 1535660339000000000
vsphere_host_disk,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 read_average=39i,write_average=2635i 1535660339000000000
vsphere_host_disk,disk=/var/folders/rf/txwdm4pj409f70wnkdlp7sz80000gq/T/govcsim-DC0-LocalDS_0-367088371@folder-5,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 write_average=2635i,read_average=30i 1535660339000000000
vsphere_host_mem,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 usage_average=98.5 1535660339000000000
vsphere_host_net,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 usage_average=1887i,bytesRx_average=662i,bytesTx_average=251i 1535660339000000000
vsphere_host_net,esxhostname=DC0_H0,host=host.example.com,interface=vmnic0,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 usage_average=1481i,bytesTx_average=899i,bytesRx_average=992i 1535660339000000000
vsphere_host_cpu,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 used_summation=50405i,costop_summation=2i,utilization_average=17.32,latency_average=0.61,ready_summation=14843i,usage_average=27.94,coreUtilization_average=32.12,wait_summation=3058787i,idle_summation=56600i,readiness_average=0.36 1535660339000000000
vsphere_host_cpu,cpu=0,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 coreUtilization_average=37.61,utilization_average=17.05,used_summation=38013i,usage_average=32.66,idle_summation=89575i 1535660339000000000
vsphere_host_cpu,cpu=1,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 coreUtilization_average=25.92,utilization_average=18.72,used_summation=39790i,usage_average=40.42,idle_summation=69457i 1535660339000000000
vsphere_host_net,clustername=DC0_C0,esxhostname=DC0_C0_H0,host=host.example.com,interface=vmnic0,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 usage_average=1246i,bytesTx_average=673i,bytesRx_average=781i 1535660339000000000
vsphere_host_cpu,clustername=DC0_C0,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 coreUtilization_average=33.8,idle_summation=77121i,ready_summation=15857i,readiness_average=0.39,used_summation=29554i,costop_summation=2i,wait_summation=4338417i,utilization_average=17.87,latency_average=0.44,usage_average=28.78 1535660339000000000
vsphere_host_cpu,clustername=DC0_C0,cpu=0,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 idle_summation=86610i,coreUtilization_average=34.36,utilization_average=19.03,used_summation=28766i,usage_average=23.72 1535660339000000000
vsphere_host_cpu,clustername=DC0_C0,cpu=1,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 coreUtilization_average=33.15,utilization_average=16.8,used_summation=44282i,usage_average=30.08,idle_summation=93490i 1535660339000000000
vsphere_host_disk,clustername=DC0_C0,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 read_average=56i,write_average=1672i 1535660339000000000
vsphere_host_disk,clustername=DC0_C0,disk=/var/folders/rf/txwdm4pj409f70wnkdlp7sz80000gq/T/govcsim-DC0-LocalDS_0-367088371@folder-5,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 write_average=2110i,read_average=48i 1535660339000000000
vsphere_host_mem,clustername=DC0_C0,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 usage_average=116.21 1535660339000000000
vsphere_host_net,clustername=DC0_C0,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 bytesRx_average=726i,bytesTx_average=643i,usage_average=1504i 1535660339000000000
vsphere_host_mem,clustername=DC0_C0,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 usage_average=116.21 1535660339000000000
vsphere_host_net,clustername=DC0_C0,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 bytesRx_average=726i,bytesTx_average=643i,usage_average=1504i 1535660339000000000

vSAN 示例输出

vsphere_vsan_performance_hostdomclient,clustername=Example-VSAN,dcname=Example-DC,host=host.example.com,hostname=DC0_C0_H0,moid=domain-c8,source=Example-VSAN,vcenter=localhost:8898 iops_read=7,write_congestion=0,unmap_congestion=0,read_count=2199,iops=8,latency_max_write=8964,latency_avg_unmap=0,latency_avg_write=1883,write_count=364,num_oio=12623,throughput=564127,client_cache_hits=0,latency_max_read=17821,latency_max_unmap=0,read_congestion=0,latency_avg=1154,congestion=0,throughput_read=554721,latency_avg_read=1033,throughput_write=9406,client_cache_hit_rate=0,iops_unmap=0,throughput_unmap=0,latency_stddev=1315,io_count=2563,oio=4,iops_write=1,unmap_count=0 1578955200000000000
vsphere_vsan_performance_clusterdomcompmgr,clustername=Example-VSAN,dcname=Example-DC,host=host.example.com,moid=domain-c7,source=Example-VSAN,uuid=XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXX,vcenter=localhost:8898 latency_avg_rec_write=0,latency_avg_write=9886,congestion=0,iops_resync_read=0,lat_avg_resync_read=0,iops_read=289,latency_avg_read=1184,throughput_write=50137368,iops_rec_write=0,throughput_rec_write=0,tput_resync_read=0,throughput_read=9043654,iops_write=1272,oio=97 1578954900000000000
vsphere_vsan_performance_clusterdomclient,clustername=Example-VSAN,dcname=Example-DC,host=host.example.com,moid=domain-c7,source=Example-VSAN,uuid=XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXX,vcenter=localhost:8898 latency_avg_write=1011,congestion=0,oio=26,iops_read=6,throughput_read=489093,latency_avg_read=1085,iops_write=43,throughput_write=435142 1578955200000000000
vsphere_vsan_summary,clustername=Example-VSAN,dcname=Example-DC,host=host.example.com,moid=domain-c7,source=Example-VSAN,vcenter=localhost:8898 total_bytes_to_sync=0i,total_objects_to_sync=0i,total_recovery_eta=0i 1578955489000000000
vsphere_vsan_summary,clustername=Example-VSAN,dcname=Example-DC,host=host.example.com,moid=domain-c7,source=Example-VSAN,vcenter=localhost:8898 overall_health=1i 1578955489000000000
vsphere_vsan_summary,clustername=Example-VSAN,dcname=Example-DC,host=host.example.com,moid=domain-c7,source=Example-VSAN,vcenter=localhost:8898 free_capacity_byte=11022535578757i,total_capacity_byte=14102625779712i 1578955488000000000

此页面是否有帮助?

感谢您的反馈!


InfluxDB 3.8 新特性

InfluxDB 3.8 和 InfluxDB 3 Explorer 1.6 的主要增强功能。

查看博客文章

InfluxDB 3.8 现已适用于 Core 和 Enterprise 版本,同时发布了 InfluxDB 3 Explorer UI 的 1.6 版本。本次发布着重于操作成熟度,以及如何更轻松地部署、管理和可靠地运行 InfluxDB。

更多信息,请查看

InfluxDB Docker 的 latest 标签将指向 InfluxDB 3 Core

在 **2026 年 2 月 3 日**,InfluxDB Docker 镜像的 latest 标签将指向 InfluxDB 3 Core。为避免意外升级,请在您的 Docker 部署中使用特定的版本标签。

如果使用 Docker 来安装和运行 InfluxDB,latest 标签将指向 InfluxDB 3 Core。为避免意外升级,请在您的 Docker 部署中使用特定的版本标签。例如,如果使用 Docker 运行 InfluxDB v2,请将 latest 版本标签替换为 Docker pull 命令中的特定版本标签 — 例如

docker pull influxdb:2