Hi I am new in python and I am exploring pyvmomi. Here I want to fetch vm info.Like I have one data center i.e "DataCenter1"
In that data center there are two folders LinuxServer and WindowsServer these folder contains vms.So I want to fetch vm name with their respective folder names
DataCenter1
|
|----LinuxServer
| |---RHEL-VM
| |---Ubuntu-VM
|
|----WindowsServer
| |---win2k12r2-VM
| |---win2k8r2-VM
My code:
from pyvim.connect import SmartConnect, Disconnect
import ssl
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
connect = SmartConnect(host="172.0.0.0",user="root",pwd="****",port=int("443"),sslContext=context)
datacenter = connect.content.rootFolder.childEntity[0]
print (datacenter)
vms = datacenter.vmFolder.childEntity
for i in vms:
print(i.name)
#Here I want to fetch vm name and their respective folder names
Disconnect(c)
Here I am able to fetch all vm names but I want to fetch folder name of respective vm.
Is there any method ?
Can you please guide me.
Here you will get parent name of that vm means i.e your folder name if it exist.
from pyvim.connect import SmartConnect, Disconnect
import ssl
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
connect = SmartConnect(host="172.0.0.0",user="root",pwd="****",port=int("443"),sslContext=context)
datacenter = connect.content.rootFolder.childEntity[0]
print (datacenter)
vms = datacenter.vmFolder.childEntity
for vm in vms:
print(vm.parent.name)
Disconnect(c)
I use python3.6, full example below. It implement login vsphere and print every virtual machine name.
#!/usr/bin/env python3.6
# encoding: utf-8
from pyVim import connect
import ssl
def login():
ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ssl_context.verify_mode = ssl.CERT_NONE
si = connect.SmartConnect(host='192.168.0.1', user='root', pwd='password',
sslContext=ssl_context)
print(si)
print('\nHello World!\n')
print('If you got here, you authenticted into vCenter.')
data_center = si.content.rootFolder.childEntity[0]
vms = data_center.vmFolder.childEntity
for vm in vms:
print(vm.name)
if __name__ == '__main__':
login()
result:
'vim.ServiceInstance:ServiceInstance'
Hello World!
If you got here, you authenticted into vCenter.
sclautoesxd12v03
sclautoesxd12v04
sclautoesxd12v07
sclautoesxd12v09
sclautoesxd12v11
sclautoesxd12v12
sclautoesxd12v13
sclautoesxd12v16
sclautoesxd12v17
sclautoesxd12v01
sclautoesxd12v02
sclautoesxd12v05
sclautoesxd12v06
sclautoesxd12v08
sclautoesxd12v10
sclautoesxd12v14
sclautoesxd12v15
Related
I am getting the following error when trying to use pyvmomi to get a list of VMs from the vcenter server appliance.
pyVmomi.VmomiSupport.vim.fault.NoPermission: (vim.fault.NoPermission) {
dynamicType = <unset>,
dynamicProperty = (vmodl.DynamicProperty) [],
msg = 'Permission to perform this operation was denied.',
faultCause = <unset>,
faultMessage = (vmodl.LocalizableMessage) [],
object = 'vim.Folder:group-d1',
privilegeId = 'System.View',
missingPrivileges = (vim.fault.NoPermission.EntityPrivileges) [
(vim.fault.NoPermission.EntityPrivileges) {
dynamicType = <unset>,
dynamicProperty = (vmodl.DynamicProperty) [],
entity = 'vim.Folder:group-d1',
privilegeIds = (str) [
'System.View'
]
}
]
}
This is my python code :
import atexit
import ssl
from pyVim import connect
from pyVmomi import vim
import pdb
def vconnect(hostIP,port=None):
if (True):
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE # disable our certificate checking for lab
else:
context = ssl.create_default_context()
context.options |= ssl.OP_NO_TLSv1_3
#cipher = 'DHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA:ECDHE-ECDSA-AES128-GCM-SHA256'
#context.set_ciphers(cipher)
pdb.set_trace()
if (port):
service_instance = connect.SmartConnect(host=str(hostIP), # build python connection to vSphere
user="root",
pwd="HagsLoff#1324",
port=port,
sslContext=context)
else:
service_instance = connect.SmartConnect(host=str(hostIP), # build python connection to vSphere
user="root",
pwd="HagsLoff#1324",
sslContext=context)
atexit.register(connect.Disconnect, service_instance) # build disconnect logic
content = service_instance.RetrieveContent()
container = content.rootFolder # starting point to look into
viewType = [vim.VirtualMachine] # object types to look for
recursive = True # whether we should look into it recursively
containerView = content.viewManager.CreateContainerView(container, viewType, recursive) # create container view
children = containerView.view
for child in children: # for each statement to iterate all names of VMs in the environment
summary = child.summary
print(summary.config.name)
# connecting to ESX host
vconnect("192.168.160.160")
# connecting to vcsa VM
vconnect("192.168.160.170", 443)
So I am using a nested ESX that runs on my workstation 16. I have deployed the vcsa on this ESX host via the windows CLI installer. Querying the ESX host works fine whereas querying the vcenter server appliance (vcsa) gives me the above error.
I looked at this discussion which talks about setting 'global permissions'; however on my vcenter server management VM, my 'administration' tab does not look anything like this:
What it instead looks like is this:
So apparently I have a 'vcenter server management' appliance and not what is referred to as the 'vsphere client'.
So with this context set, I have some questions:
Is the error above due to my trial license?
How is the 'vcenter server management (vcsa)' appliance different from the 'vsphere client'?
Is it possible to change 'global permissions' on the vcsa or do I need to get the 'vsphere client' to do that?
I tried adding the default port (443) as mentioned here to no avail. Keen to hear from you soon
I'm having trouble authenticating and writing data to a spanner database locally. All imports are up to date - google.cloud, google.auth2, etc. I have tried having someone else run this and it works fine, so the problem seems to be something on my end - something wrong or misconfigured on my computer, maybe where the credentials are stored or something?
Anyone have any ideas?
from google.cloud import spanner
from google.api_core.exceptions import GoogleAPICallError
from google.api_core.datetime_helpers import DatetimeWithNanoseconds
import datetime
from google.oauth2 import service_account
def write_to(database):
record = [[
1041613562310836275,
'test_name'
]]
columns = ("id", "name")
insert_errors = []
try:
with database.batch() as batch:
batch.insert_or_update(
table = "guild",
columns = columns,
values = record,
)
except GoogleAPICallError as e:
print(f'error: {e}')
insert_errors.append(e.message)
pass
return insert_errors
if __name__ == "__main__":
credentials = service_account.Credentials.from_service_account_file(r'path\to\a.json')
instance_id = 'instance-name'
database_id = 'database-name'
spanner_client = spanner.Client(project='project-name', credentials=credentials)
print(f'spanner creds: {spanner_client.credentials}')
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
insert_errors = write_to(database)
some credential tests:
creds = service_account.Credentials.from_service_account_file(a_json)
<google.oauth2.service_account.Credentials at 0x...>
spanner_client.credentials
<google.auth.credentials.AnonymousCredentials at 0x...>
spanner_client.credentials.signer_email
AttributeError: 'AnonymousCredentials' object has no attribute 'signer_email'
creds.signer_email
'...#....iam.gserviceaccount.com'
spanner.Client().from_service_account_json(a_json).credentials
<google.auth.credentials.AnonymousCredentials object at 0x...>
The most common reason for this is that you have accidentally set (or forgot to unset) the environment variable SPANNER_EMULATOR_HOST. If this environment variable has been set, the client library will try to connect to the emulator instead of Cloud Spanner. This will cause the client library to wait for a long time while trying to connect to the emulator (assuming that the emulator is not running on your machine). Unset the environment variable to fix this problem.
Note: This environment variable will only affect Cloud Spanner client libraries, which is why other Google Cloud product will work on the same machine. The script will also in most cases work on other machines, as they are unlikely to have this environment variable set.
We are running Airflow via AWS's managed MWAA Offering. As part of their offering they include a tutorial on securely using the SSH Operator in conjunction with AWS Secrets Manager. The gist of how their solution works is described below:
Run a Task that fetches the pem file from a Secrets Manager location and store it on the filesystem at /tmp/mypem.pem.
In the SSH Connection include the extra information that specifies the file location
{"key_file":"/tmp/mypem.pem"}
Use the SSH Connection in the SSHOperator.
In short the workflow is supposed to be:
Task1 gets the pem -> Task2 uses the pem via the SSHOperator
All of this is great in theory, but it doesn't actually work. It doesn't work because Task1 may run on a different node from Task2, which means Task2 can't access the /tmp/mypem.pem file location that Task1 wrote the file to. AWS is aware of this limitation according to AWS Support, but now we need to understand another way to do this.
Question
How can we securely store and access a pem file that can then be used by Tasks running on different nodes via the SSHOperator?
I ran into the same problem. I extended the SSHOperator to do both steps in one call.
In AWS Secrets Manager, two keys are added for airflow to retrieve on execution.
{variables_prefix}/airflow-user-ssh-key : the value of the private key
{connections_prefix}/ssh_airflow_user : ssh://replace.user#replace.remote.host?key_file=%2Ftmp%2Fairflow-user-ssh-key
from typing import Optional, Sequence
from os.path import basename, splitext
from airflow.models import Variable
from airflow.providers.ssh.operators.ssh import SSHOperator
from airflow.providers.ssh.hooks.ssh import SSHHook
class SSHOperator(SSHOperator):
"""
SSHOperator to execute commands on given remote host using the ssh_hook.
:param ssh_conn_id: :ref:`ssh connection id<howto/connection:ssh>`
from airflow Connections.
:param ssh_key_var: name of Variable holding private key.
Creates "/tmp/{variable_name}.pem" to use in SSH connection.
May also be inferred from "key_file" in "extras" in "ssh_conn_id".
:param remote_host: remote host to connect (templated)
Nullable. If provided, it will replace the `remote_host` which was
defined in `ssh_hook` or predefined in the connection of `ssh_conn_id`.
:param command: command to execute on remote host. (templated)
:param timeout: (deprecated) timeout (in seconds) for executing the command. The default is 10 seconds.
Use conn_timeout and cmd_timeout parameters instead.
:param environment: a dict of shell environment variables. Note that the
server will reject them silently if `AcceptEnv` is not set in SSH config.
:param get_pty: request a pseudo-terminal from the server. Set to ``True``
to have the remote process killed upon task timeout.
The default is ``False`` but note that `get_pty` is forced to ``True``
when the `command` starts with ``sudo``.
"""
template_fields: Sequence[str] = ("command", "remote_host")
template_ext: Sequence[str] = (".sh",)
template_fields_renderers = {"command": "bash"}
def __init__(
self,
*,
ssh_conn_id: Optional[str] = None,
ssh_key_var: Optional[str] = None,
remote_host: Optional[str] = None,
command: Optional[str] = None,
timeout: Optional[int] = None,
environment: Optional[dict] = None,
get_pty: bool = False,
**kwargs,
) -> None:
super().__init__(
ssh_conn_id=ssh_conn_id,
remote_host=remote_host,
command=command,
timeout=timeout,
environment=environment,
get_pty=get_pty,
**kwargs,
)
if ssh_key_var is None:
key_file = SSHHook(ssh_conn_id=self.ssh_conn_id).key_file
key_filename = basename(key_file)
key_filename_no_extension = splitext(key_filename)[0]
self.ssh_key_var = key_filename_no_extension
else:
self.ssh_key_var = ssh_key_var
def import_ssh_key(self):
with open(f"/tmp/{self.ssh_key_var}", "w") as file:
file.write(Variable.get(self.ssh_key_var))
def execute(self, context):
self.import_ssh_key()
super().execute(context)
The answer by holly is good. I am sharing a different way I solved this problem. I used the strategy of converting the SSH Connection into a URI and then input that into Secrets Manager under the expected connections path, and everything worked great via the SSH Operator. Below are the general steps I took.
Generate an encoded URI
import json
from airflow.models.connection import Connection
from pathlib import Path
pem = Path(“/my/pem/file”/pem).read_text()
myconn= Connection(
conn_id="connX”,
conn_type="ssh",
host="10.x.y.z,
login=“mylogin”,
extra=json.dumps(dict(private_key=pem)),
print(myconn.get_uri())
Input that URI under the environment's configured path in Secrets Manager. The important note here is to input the value in the plaintext field without including a key. Example:
airflow/connections/connX and under Plaintext only include the URI value
Now in the SSHOperator you can reference this connection Id like any other.
remote_task = SSHOperator(
task_id="ssh_and_execute_command",
ssh_conn_id="connX"
command="whoami",
)
How to get all the VM's information for all Projects in GCP.
I have multiple Projects in My GCP account and I need the Operating System, Version of Operating of System and Build Version of the Operating System for All the VM's for all Project in GCP.
I didn't find a tool to that, so I code something that you can use.
This code must be improved, but here you can find a way to scan all project and get information about the OS.
Let me know if it helps you.
Pip install:
!pip install google-cloud
!pip install google-api-python-client
!pip install oauth2client
Code:
import subprocess
import sys
import logging
import threading
import pprint
logger = logging.Logger('catch_all')
def execute_bash(parameters):
try:
return subprocess.check_output(parameters)
except Exception as e:
logger.error(e)
logger.error('ERROR: Looking in jupyter console for more information')
def scan_gce(project, results_scan):
print('Scanning project: "{}"'.format(project))
ex = execute_bash(['gcloud','compute', 'instances', 'list', '--project', project, '--format=value(name,zone, status)'])
list_result_vms = []
if ex:
list_vms = ex.decode("utf-8").split('\n')
for vm in list_vms:
if vm:
vm_info = vm.split('\t')
print('Scanning Instance: "{}" in project "{}"'.format(vm_info[0], project))
results_bytes = execute_bash(['gcloud', 'compute', '--project',project,
'ssh', '--zone', vm_info[1], vm_info[0],
'--command', 'cat /etc/*-release' ])
if results_bytes:
results = results_bytes.decode("utf-8").split('\n')
list_result_vms.append({'instance_name': vm_info[0],'result':results})
results_scan.append({'project':project, 'vms':list_result_vms})
list_projects = execute_bash(['gcloud','projects', 'list', '--format=value(projectId)']).decode("utf-8").split('\n')
threads_project = []
results_scan = []
for project in list_projects :
t = threading.Thread(target=scan_gce, args=(project, results_scan))
threads_project.append(t)
t.start()
for t in threads_project:
t.join()
for result in results_scan:
pprint.pprint(result)
You can find the full code here:
Wuick and dirty:
gcloud projects list --format 'value(PROJECT_ID)' >> proj_list
cat proj_list | while read pj; do gcloud compute instances list --project $pj; done
You can use the following command in the Cloud Shell to fetch all projects and then show the instances for each of them:
for i in $(gcloud projects list | sed 1d | cut -f1 -d$' '); do
gcloud compute instances list --project $i;done;
note: make sure you have compute.instances.list permission to all of the projects
Here is how you do it using the pip3 install -U google-api-python-client without using bash. Note, this is to be ran with keyless auth. Using service account keys is bad practice.
https://github.com/googleapis/google-api-python-client/blob/main/docs/start.md
https://github.com/googleapis/google-api-python-client/blob/main/docs/dyn/index.md
https://googleapis.github.io/google-api-python-client/docs/dyn/compute_v1.html
from googleapiclient import discovery
from googleapiclient.errors import HttpError
import yaml
import structlog
logger = structlog.stdlib.get_logger()
def get_projects() -> list:
projects: list = []
service = discovery.build('cloudresourcemanager','v1', cache_discovery=False)
request = service.projects().list()
response = request.execute()
for project in response.get('projects'):
projects.append(project.get("projectId"))
logger.debug('got projects', projects=projects)
return projects
def get_zones(project: str) -> list:
zones: list = []
service = discovery.build('compute','v1', cache_discovery=False)
request = service.zones().list(project=project)
while request is not None:
response = request.execute()
if not 'items' in response:
logger.warn('no zones found')
return {}
for zone in response.get('items'):
zones.append(zone.get('name'))
request = service.zones().list_next(previous_request=request,previous_response=response)
logger.debug('got zones', zones=zones)
return zones
def get_vms() -> list:
vms: list = []
projects: list = get_projects()
service = discovery.build('compute', 'v1', cache_discovery=False)
for project in projects:
try:
zones: list = get_zones(project)
for zone in zones:
request = service.instances().list(project=project, zone=zone)
response = request.execute()
if 'items' in response:
for vm in response.get('items'):
ips: list = []
for interface in vm.get('networkInterfaces'):
ips.append(interface.get('networkIP'))
vms.append({vm.get('name'): {'self_link': vm.get('selfLink'), 'ips': ips}})
except HttpError:
pass
logger.debug('got vms', vms=vms)
return vms
if __name__ == '__main__':
data = get_vms()
with open('output.yaml', 'w') as fh:
yaml.dump(data, fh)
What are the parallel pyVmomi API for the following esxcli commands:
esxcli system settings advanced list --option /DataMover/HardwareAcceleratedMove
esxcli system settings advanced list --option /DataMover/HardwareAcceleratedInit
esxcli system settings advanced list --option /VMFS3/HardwareAcceleratedLocking
esxcli system settings advanced list --option /VMFS3/EnableBlockDelete
esxcli storage nmp device list
I would like to get this info for all ESXs that resides in a specific datacenter
Thanks,
import atexit
from pyVim import connect
from pyVmomi import vmodl
from pyVmomi import vim
import tools.cli as cli
def print_host_info(host_machine):
"""
Print information for a particular host machine
"""
print host_machine.config.network.dnsConfig.hostName
print host_machine.config.product.version
for option in host_machine.config.option:
if option.key in ('VMFS3.UseATSForHBOnVMFS5','DataMover.HardwareAcceleratedInit','DataMover.HardwareAcceleratedMove','VMFS3.HardwareAcceleratedLocking','VMFS3.EnableBlockDelete') :
print option.key,option.value
def main():
"""
Simple command-line program for listing the hosts machines on a system.
"""
args = cli.get_args()
try:
service_instance = connect.SmartConnect(host=args.host,
user=args.user,
pwd=args.password,
port=int(args.port))
atexit.register(connect.Disconnect, service_instance)
content = service_instance.RetrieveContent()
container = content.rootFolder # starting point to look into
viewType = [vim.HostSystem] # object types to look for
recursive = True # whether we should look into it recursively
containerView = content.viewManager.CreateContainerView(
container, viewType, recursive)
children = containerView.view
for child in children:
print_host_info(child)
except vmodl.MethodFault as error:
print("Caught vmodl fault : " + error.msg)
return -1
return 0
# Start program
if __name__ == "__main__":
main()