Server IP : 66.29.132.122 / Your IP : 3.145.176.46 Web Server : LiteSpeed System : Linux business142.web-hosting.com 4.18.0-553.lve.el8.x86_64 #1 SMP Mon May 27 15:27:34 UTC 2024 x86_64 User : admazpex ( 531) PHP Version : 7.2.34 Disable Function : NONE MySQL : OFF | cURL : ON | WGET : ON | Perl : ON | Python : ON | Sudo : OFF | Pkexec : OFF Directory : /proc/self/root/proc/self/root/lib/python3.6/site-packages/procfs/ |
Upload File : |
#!/usr/bin/python3 # -*- python -*- # -*- coding: utf-8 -*- # SPDX-License-Identifier: GPL-2.0-only # # Copyright (C) 2007-2015 Red Hat, Inc. # import os import platform import re import time from functools import reduce from six.moves import range from procfs.utilist import bitmasklist VERSION = "0.7.3" def is_s390(): """ Return True if running on s390 or s390x """ machine = platform.machine() return bool(re.search('s390', machine)) def process_cmdline(pid_info): """ Returns the process command line, if available in the given `process' class, if not available, falls back to using the comm (short process name) in its pidstat key. """ if pid_info["cmdline"]: return reduce(lambda a, b: a + " %s" % b, pid_info["cmdline"]).strip() try: """ If a pid disappears before we query it, return None """ return pid_info["stat"]["comm"] except: return None class pidstat: """ Provides a dictionary to access the fields in the per process /proc/PID/stat files. One can obtain the available fields by asking for the keys of the dictionary, e.g.: >>> p = procfs.pidstat(1) >>> print p.keys() ['majflt', 'rss', 'cnswap', 'cstime', 'pid', 'session', 'startstack', 'startcode', 'cmajflt', 'blocked', 'exit_signal', 'minflt', 'nswap', 'environ', 'priority', 'state', 'delayacct_blkio_ticks', 'policy', 'rt_priority', 'ppid', 'nice', 'cutime', 'endcode', 'wchan', 'num_threads', 'sigcatch', 'comm', 'stime', 'sigignore', 'tty_nr', 'kstkeip', 'utime', 'tpgid', 'itrealvalue', 'kstkesp', 'rlim', 'signal', 'pgrp', 'flags', 'starttime', 'cminflt', 'vsize', 'processor'] And then access the various process properties using it as a dictionary: >>> print p['comm'] systemd >>> print p['priority'] 20 >>> print p['state'] S Please refer to the 'procfs(5)' man page, by using: $ man 5 procfs To see information for each of the above fields, it is part of the 'man-pages' RPM package. """ # Entries with the same value, the one with a comment after it is the # more recent, having replaced the other name in v4.1-rc kernel times. PF_ALIGNWARN = 0x00000001 PF_STARTING = 0x00000002 PF_EXITING = 0x00000004 PF_EXITPIDONE = 0x00000008 PF_VCPU = 0x00000010 PF_WQ_WORKER = 0x00000020 # /* I'm a workqueue worker */ PF_FORKNOEXEC = 0x00000040 PF_MCE_PROCESS = 0x00000080 # /* process policy on mce errors */ PF_SUPERPRIV = 0x00000100 PF_DUMPCORE = 0x00000200 PF_SIGNALED = 0x00000400 PF_MEMALLOC = 0x00000800 # /* set_user noticed that RLIMIT_NPROC was exceeded */ PF_NPROC_EXCEEDED = 0x00001000 PF_FLUSHER = 0x00001000 PF_USED_MATH = 0x00002000 PF_USED_ASYNC = 0x00004000 # /* used async_schedule*(), used by module init */ PF_NOFREEZE = 0x00008000 PF_FROZEN = 0x00010000 PF_FSTRANS = 0x00020000 PF_KSWAPD = 0x00040000 PF_MEMALLOC_NOIO = 0x00080000 # /* Allocating memory without IO involved */ PF_SWAPOFF = 0x00080000 PF_LESS_THROTTLE = 0x00100000 PF_KTHREAD = 0x00200000 PF_RANDOMIZE = 0x00400000 PF_SWAPWRITE = 0x00800000 PF_SPREAD_PAGE = 0x01000000 PF_SPREAD_SLAB = 0x02000000 PF_THREAD_BOUND = 0x04000000 # /* Userland is not allowed to meddle with cpus_allowed */ PF_NO_SETAFFINITY = 0x04000000 PF_MCE_EARLY = 0x08000000 # /* Early kill for mce process policy */ PF_MEMPOLICY = 0x10000000 PF_MUTEX_TESTER = 0x20000000 PF_FREEZER_SKIP = 0x40000000 PF_FREEZER_NOSIG = 0x80000000 # /* this thread called freeze_processes and should not be frozen */ PF_SUSPEND_TASK = 0x80000000 proc_stat_fields = ["pid", "comm", "state", "ppid", "pgrp", "session", "tty_nr", "tpgid", "flags", "minflt", "cminflt", "majflt", "cmajflt", "utime", "stime", "cutime", "cstime", "priority", "nice", "num_threads", "itrealvalue", "starttime", "vsize", "rss", "rlim", "startcode", "endcode", "startstack", "kstkesp", "kstkeip", "signal", "blocked", "sigignore", "sigcatch", "wchan", "nswap", "cnswap", "exit_signal", "processor", "rt_priority", "policy", "delayacct_blkio_ticks", "environ"] def __init__(self, pid, basedir="/proc"): self.pid = pid try: self.load(basedir) except FileNotFoundError: # The file representing the pid has disappeared # propagate the error to the user to handle raise def __getitem__(self, fieldname): return self.fields[fieldname] def keys(self): return list(self.fields.keys()) def values(self): return list(self.fields.values()) def has_key(self, fieldname): return fieldname in self.fields def items(self): return self.fields def __contains__(self, fieldname): return fieldname in self.fields def load(self, basedir="/proc"): try: f = open(f"{basedir}/{self.pid}/stat") except FileNotFoundError: # The pid has disappeared, propagate the error raise fields = f.readline().strip().split(') ') f.close() fields = fields[0].split(' (') + fields[1].split() self.fields = {} nr_fields = min(len(fields), len(self.proc_stat_fields)) for i in range(nr_fields): attrname = self.proc_stat_fields[i] value = fields[i] if attrname == "comm": self.fields["comm"] = value.strip('()') else: try: self.fields[attrname] = int(value) except: self.fields[attrname] = value def is_bound_to_cpu(self): """ Returns true if this process has a fixed smp affinity mask, not allowing it to be moved to a different set of CPUs. """ return bool(self.fields["flags"] & self.PF_THREAD_BOUND) def process_flags(self): """ Returns a list with all the process flags known, details depend on kernel version, declared in the file include/linux/sched.h in the kernel sources. As of v4.2-rc7 these include (from include/linux/sched.h comments): PF_EXITING Getting shut down PF_EXITPIDONE Pi exit done on shut down PF_VCPU I'm a virtual CPU PF_WQ_WORKER I'm a workqueue worker PF_FORKNOEXEC Forked but didn't exec PF_MCE_PROCESS Process policy on mce errors PF_SUPERPRIV Used super-user privileges PF_DUMPCORE Dumped core PF_SIGNALED Killed by a signal PF_MEMALLOC Allocating memory PF_NPROC_EXCEEDED Set_user noticed that RLIMIT_NPROC was exceeded PF_USED_MATH If unset the fpu must be initialized before use PF_USED_ASYNC Used async_schedule*(), used by module init PF_NOFREEZE This thread should not be frozen PF_FROZEN Frozen for system suspend PF_FSTRANS Inside a filesystem transaction PF_KSWAPD I am kswapd PF_MEMALLOC_NOIO Allocating memory without IO involved PF_LESS_THROTTLE Throttle me less: I clean memory PF_KTHREAD I am a kernel thread PF_RANDOMIZE Randomize virtual address space PF_SWAPWRITE Allowed to write to swap PF_NO_SETAFFINITY Userland is not allowed to meddle with cpus_allowed PF_MCE_EARLY Early kill for mce process policy PF_MUTEX_TESTER Thread belongs to the rt mutex tester PF_FREEZER_SKIP Freezer should not count it as freezable PF_SUSPEND_TASK This thread called freeze_processes and should not be frozen """ sflags = [] for attr in dir(self): if attr[:3] != "PF_": continue value = getattr(self, attr) if value & self.fields["flags"]: sflags.append(attr) return sflags def cannot_set_affinity(self, pid): PF_NO_SETAFFINITY = 0x04000000 try: return bool(int(self.processes[pid]["stat"]["flags"]) & PF_NO_SETAFFINITY) except: return True def cannot_set_thread_affinity(self, pid, tid): PF_NO_SETAFFINITY = 0x04000000 try: return bool(int(self.processes[pid].threads[tid]["stat"]["flags"]) & PF_NO_SETAFFINITY) except: return True class pidstatus: """ Provides a dictionary to access the fields in the per process /proc/PID/status files. This provides additional information about processes and threads to what can be obtained with the procfs.pidstat() class. One can obtain the available fields by asking for the keys of the dictionary, e.g.: >>> import procfs >>> p = procfs.pidstatus(1) >>> print p.keys() ['VmExe', 'CapBnd', 'NSpgid', 'Tgid', 'NSpid', 'VmSize', 'VmPMD', 'ShdPnd', 'State', 'Gid', 'nonvoluntary_ctxt_switches', 'SigIgn', 'VmStk', 'VmData', 'SigCgt', 'CapEff', 'VmPTE', 'Groups', 'NStgid', 'Threads', 'PPid', 'VmHWM', 'NSsid', 'VmSwap', 'Name', 'SigBlk', 'Mems_allowed_list', 'VmPeak', 'Ngid', 'VmLck', 'SigQ', 'VmPin', 'Mems_allowed', 'CapPrm', 'Seccomp', 'VmLib', 'Cpus_allowed', 'Uid', 'SigPnd', 'Pid', 'Cpus_allowed_list', 'TracerPid', 'CapInh', 'voluntary_ctxt_switches', 'VmRSS', 'FDSize'] >>> print p["Pid"] 1 >>> print p["Threads"] 1 >>> print p["VmExe"] 1248 kB >>> print p["Cpus_allowed"] f >>> print p["SigQ"] 0/30698 >>> print p["VmPeak"] 320300 kB >>> Please refer to the 'procfs(5)' man page, by using: $ man 5 procfs To see information for each of the above fields, it is part of the 'man-pages' RPM package. In the man page there will be references to further documentation, like referring to the "getrlimit(2)" man page when explaining the "SigQ" line/field. """ def __init__(self, pid, basedir="/proc"): self.pid = pid self.load(basedir) def __getitem__(self, fieldname): return self.fields[fieldname] def keys(self): return list(self.fields.keys()) def values(self): return list(self.fields.values()) def has_key(self, fieldname): return fieldname in self.fields def items(self): return self.fields def __contains__(self, fieldname): return fieldname in self.fields def load(self, basedir="/proc"): self.fields = {} with open(f"{basedir}/{self.pid}/status") as f: for line in f.readlines(): fields = line.split(":") if len(fields) != 2: continue name = fields[0] value = fields[1].strip() try: self.fields[name] = int(value) except: self.fields[name] = value class process: """ Information about a process with a given pid, provides a dictionary with two entries, instances of different wrappers for /proc/ process related meta files: "stat" and "status", see the documentation for procfs.pidstat and procfs.pidstatus for further info about those classes. """ def __init__(self, pid, basedir="/proc"): self.pid = pid self.basedir = basedir def __getitem__(self, attr): if not hasattr(self, attr): if attr in ("stat", "status"): if attr == "stat": sclass = pidstat else: sclass = pidstatus try: setattr(self, attr, sclass(self.pid, self.basedir)) except FileNotFoundError: # The pid has disappeared, progate the error raise elif attr == "cmdline": self.load_cmdline() elif attr == "threads": self.load_threads() elif attr == "cgroups": self.load_cgroups() elif attr == "environ": self.load_environ() return getattr(self, attr) def has_key(self, attr): return hasattr(self, attr) def __contains__(self, attr): return hasattr(self, attr) def load_cmdline(self): try: with open(f"/proc/{self.pid}/cmdline") as f: self.cmdline = f.readline().strip().split('\0')[:-1] except FileNotFoundError: """ This can happen when a pid disappears """ self.cmdline = None except UnicodeDecodeError: """ TODO - this shouldn't happen, needs to be investigated """ self.cmdline = None def load_threads(self): self.threads = pidstats(f"/proc/{self.pid}/task/") # remove thread leader del self.threads[self.pid] def load_cgroups(self): self.cgroups = "" with open(f"/proc/{self.pid}/cgroup") as f: for line in reversed(f.readlines()): if len(self.cgroups) != 0: self.cgroups = self.cgroups + "," + line[:-1] else: self.cgroups = line[:-1] def load_environ(self): """ Loads the environment variables for this process. The entries then become available via the 'environ' member, or via the 'environ' dict key when accessing as p["environ"]. E.g.: >>> all_processes = procfs.pidstats() >>> firefox_pid = all_processes.find_by_name("firefox") >>> firefox_process = all_processes[firefox_pid[0]] >>> print firefox_process["environ"]["PWD"] /home/acme >>> print len(firefox_process.environ.keys()) 66 >>> print firefox_process["environ"]["SHELL"] /bin/bash >>> print firefox_process["environ"]["USERNAME"] acme >>> print firefox_process["environ"]["HOME"] /home/acme >>> print firefox_process["environ"]["MAIL"] /var/spool/mail/acme >>> """ self.environ = {} with open(f"/proc/{self.pid}/environ") as f: for x in f.readline().split('\0'): if len(x) > 0: y = x.split('=') self.environ[y[0]] = y[1] class pidstats: """ Provides access to all the processes in the system, to get a picture of how many processes there are at any given moment. The entries can be accessed as a dictionary, keyed by pid. Also there are methods to find processes that match a given COMM or regular expression. """ def __init__(self, basedir="/proc"): self.basedir = basedir self.processes = {} self.reload() def __getitem__(self, key): return self.processes[key] def __delitem__(self, key): # not clear on why this can fail, but it can try: del self.processes[key] except: pass def keys(self): return list(self.processes.keys()) def values(self): return list(self.processes.values()) def has_key(self, key): return key in self.processes def items(self): return self.processes def __contains__(self, key): return key in self.processes def reload(self): """ This operation will throw away the current dictionary contents, if any, and read all the pid files from /proc/, instantiating a 'process' instance for each of them. This is a high overhead operation, and should be avoided if the perf python binding can be used to detect when new threads appear and existing ones terminate. In RHEL it is found in the python-perf rpm package. More information about the perf facilities can be found in the 'perf_event_open' man page. """ del self.processes self.processes = {} pids = os.listdir(self.basedir) for spid in pids: try: pid = int(spid) except: continue self.processes[pid] = process(pid, self.basedir) def reload_threads(self): to_remove = [] for pid in list(self.processes.keys()): try: self.processes[pid].load_threads() except OSError: # process vanished, remove it to_remove.append(pid) for pid in to_remove: del self.processes[pid] def find_by_name(self, name): name = name[:15] pids = [] for pid in list(self.processes.keys()): try: if name == self.processes[pid]["stat"]["comm"]: pids.append(pid) except IOError: # We're doing lazy loading of /proc files # So if we get this exception is because the # process vanished, remove it del self.processes[pid] return pids def find_by_regex(self, regex): pids = [] for pid in list(self.processes.keys()): try: if regex.match(self.processes[pid]["stat"]["comm"]): pids.append(pid) except IOError: # We're doing lazy loading of /proc files # So if we get this exception is because the # process vanished, remove it del self.processes[pid] return pids def find_by_cmdline_regex(self, regex): pids = [] for pid in list(self.processes.keys()): try: if regex.match(process_cmdline(self.processes[pid])): pids.append(pid) except IOError: # We're doing lazy loading of /proc files # So if we get this exception is because the # process vanished, remove it del self.processes[pid] return pids def get_per_cpu_rtprios(self, basename): cpu = 0 priorities = "" processed_pids = [] while True: name = f"{basename}/{cpu}" pids = self.find_by_name(name) if not pids or len([n for n in pids if n not in processed_pids]) == 0: break for pid in pids: try: priorities += f'{self.processes[pid]["stat"]["rt_priority"]}' except IOError: # We're doing lazy loading of /proc files # So if we get this exception is because the # process vanished, remove it del self.processes[pid] processed_pids += pids cpu += 1 priorities = priorities.strip(',') return priorities def get_rtprios(self, name): cpu = 0 priorities = "" processed_pids = [] while True: pids = self.find_by_name(name) if not pids or len([n for n in pids if n not in processed_pids]) == 0: break for pid in pids: try: priorities += f'{self.processes[pid]["stat"]["rt_priority"]}' except IOError: # We're doing lazy loading of /proc files # So if we get this exception is because the # process vanished, remove it del self.processes[pid] processed_pids += pids cpu += 1 priorities = priorities.strip(',') return priorities def is_bound_to_cpu(self, pid): """ Checks if a given pid can't have its SMP affinity mask changed. """ return self.processes[pid]["stat"].is_bound_to_cpu() class interrupts: """ Information about IRQs in the system. A dictionary keyed by IRQ number will have as its value another dictionary with "cpu", "type" and "users" keys, with the SMP affinity mask, type of IRQ and the drivers associated with each interrupt. The information comes from the /proc/interrupts file, documented in 'man procfs(5)', for instance, the 'cpu' dict is an array with one entry per CPU present in the sistem, each value being the number of interrupts that took place per CPU. E.g.: >>> import procfs >>> interrupts = procfs.interrupts() >>> thunderbolt_irq = interrupts.find_by_user("thunderbolt") >>> print thunderbolt_irq 34 >>> thunderbolt = interrupts[thunderbolt_irq] >>> print thunderbolt {'affinity': [0, 1, 2, 3], 'type': 'PCI-MSI', 'cpu': [3495, 0, 81, 0], 'users': ['thunderbolt']} >>> """ def __init__(self): self.interrupts = {} self.reload() def __getitem__(self, key): return self.interrupts[str(key)] def keys(self): return list(self.interrupts.keys()) def values(self): return list(self.interrupts.values()) def has_key(self, key): return str(key) in self.interrupts def items(self): return self.interrupts def __contains__(self, key): return str(key) in self.interrupts def reload(self): del self.interrupts self.interrupts = {} with open("/proc/interrupts") as f: for line in f.readlines(): line = line.strip() fields = line.split() if fields[0][:3] == "CPU": self.nr_cpus = len(fields) continue irq = fields[0].strip(":") self.interrupts[irq] = {} self.interrupts[irq] = self.parse_entry(fields[1:], line) try: nirq = int(irq) except: continue self.interrupts[irq]["affinity"] = self.parse_affinity(nirq) def parse_entry(self, fields, line): dict = {} dict["cpu"] = [] dict["cpu"].append(int(fields[0])) nr_fields = len(fields) if nr_fields >= self.nr_cpus: dict["cpu"] += [int(i) for i in fields[1:self.nr_cpus]] if nr_fields > self.nr_cpus: dict["type"] = fields[self.nr_cpus] # look if there are users (interrupts 3 and 4 haven't) if nr_fields > self.nr_cpus + 1: dict["users"] = [a.strip() for a in fields[nr_fields - 1].split(',')] else: dict["users"] = [] return dict def parse_affinity(self, irq): try: with open(f"/proc/irq/{irq}/smp_affinity") as f: line = f.readline() return bitmasklist(line, self.nr_cpus) except IOError: return [0, ] def find_by_user(self, user): """ Looks up a interrupt number by the name of one of its users" E.g.: >>> import procfs >>> interrupts = procfs.interrupts() >>> thunderbolt_irq = interrupts.find_by_user("thunderbolt") >>> print thunderbolt_irq 34 >>> thunderbolt = interrupts[thunderbolt_irq] >>> print thunderbolt {'affinity': [0, 1, 2, 3], 'type': 'PCI-MSI', 'cpu': [3495, 0, 81, 0], 'users': ['thunderbolt']} >>> """ for i in list(self.interrupts.keys()): if "users" in self.interrupts[i] and \ user in self.interrupts[i]["users"]: return i return None def find_by_user_regex(self, regex): """ Looks up a interrupt number by a regex that matches names of its users" E.g.: >>> import procfs >>> import re >>> interrupts = procfs.interrupts() >>> usb_controllers = interrupts.find_by_user_regex(re.compile(".*hcd")) >>> print usb_controllers ['22', '23', '31'] >>> print [ interrupts[irq]["users"] for irq in usb_controllers ] [['ehci_hcd:usb4'], ['ehci_hcd:usb3'], ['xhci_hcd']] >>> """ irqs = [] for i in list(self.interrupts.keys()): if "users" not in self.interrupts[i]: continue for user in self.interrupts[i]["users"]: if regex.match(user): irqs.append(i) break return irqs class cmdline: """ Parses the kernel command line (/proc/cmdline), turning it into a dictionary." Useful to figure out if some kernel boolean knob has been turned on, as well as to find the value associated to other kernel knobs. It can also be used to find out about parameters passed to the init process, such as 'BOOT_IMAGE', etc. E.g.: >>> import procfs >>> kcmd = procfs.cmdline() >>> print kcmd.keys() ['LANG', 'BOOT_IMAGE', 'quiet', 'rhgb', 'rd.lvm.lv', 'ro', 'root'] >>> print kcmd["BOOT_IMAGE"] /vmlinuz-4.3.0-rc1+ >>> """ def __init__(self): self.options = {} self.parse() def parse(self): with open("/proc/cmdline") as f: for option in f.readline().strip().split(): fields = option.split("=") if len(fields) == 1: self.options[fields[0]] = True else: self.options[fields[0]] = fields[1] def __getitem__(self, key): return self.options[key] def keys(self): return list(self.options.keys()) def values(self): return list(self.options.values()) def items(self): return self.options class cpuinfo: """ Dictionary with information about CPUs in the system. Please refer to 'man procfs(5)' for further information about the '/proc/cpuinfo' file, that is the source of the information provided by this class. The 'man lscpu(1)' also has information about a program that uses the '/proc/cpuinfo' file. Using this class one can obtain the number of CPUs in a system: >>> cpus = procfs.cpuinfo() >>> print cpus.nr_cpus 4 It is also possible to figure out aspects of the CPU topology, such as how many CPU physical sockets exists, i.e. groups of CPUs sharing components such as CPU memory caches: >>> print len(cpus.sockets) 1 Additionally dictionary with information common to all CPUs in the system is available: >>> print cpus["model name"] Intel(R) Core(TM) i7-3667U CPU @ 2.00GHz >>> print cpus["cache size"] 4096 KB >>> """ def __init__(self, filename="/proc/cpuinfo"): self.tags = {} self.nr_cpus = 0 self.sockets = [] self.parse(filename) def __getitem__(self, key): return self.tags[key.lower()] def keys(self): return list(self.tags.keys()) def values(self): return list(self.tags.values()) def items(self): return self.tags def parse(self, filename): with open(filename) as f: for line in f.readlines(): line = line.strip() if not line: continue fields = line.split(":") tagname = fields[0].strip().lower() if tagname == "processor": self.nr_cpus += 1 continue if is_s390() and tagname == "cpu number": self.nr_cpus += 1 continue if tagname == "core id": continue self.tags[tagname] = fields[1].strip() if tagname == "physical id": socket_id = self.tags[tagname] if socket_id not in self.sockets: self.sockets.append(socket_id) self.nr_sockets = self.sockets and len(self.sockets) or \ (self.nr_cpus / ("siblings" in self.tags and int(self.tags["siblings"]) or 1)) self.nr_cores = ("cpu cores" in self.tags and int( self.tags["cpu cores"]) or 1) * self.nr_sockets class smaps_lib: """ Representation of an mmap in place for a process. Can be used to figure out which processes have an library mapped, etc. The 'perm' member can be used to figure out executable mmaps, i.e. libraries. The 'vm_start' and 'vm_end' in turn can be used when trying to resolve processor instruction pointer addresses to a symbol name in a library. """ def __init__(self, lines): fields = lines[0].split() self.vm_start, self.vm_end = [int(a, 16) for a in fields[0].split("-")] self.perms = fields[1] self.offset = int(fields[2], 16) self.major, self.minor = fields[3].split(":") self.inode = int(fields[4]) if len(fields) > 5: self.name = fields[5] else: self.name = None self.tags = {} for line in lines[1:]: fields = line.split() tag = fields[0][:-1].lower() try: self.tags[tag] = int(fields[1]) except: # VmFlags are strings self.tags[tag] = fields def __getitem__(self, key): return self.tags[key.lower()] def keys(self): return list(self.tags.keys()) def values(self): return list(self.tags.values()) def items(self): return self.tags class smaps: """ List of libraries mapped by a process. Parses the lines in the /proc/PID/smaps file, that is further documented in the procfs(5) man page. Example: Listing the executable maps for the 'sshd' process: >>> import procfs >>> processes = procfs.pidstats() >>> sshd = processes.find_by_name("sshd") >>> sshd_maps = procfs.smaps(sshd[0]) >>> for i in range(len(sshd_maps)): ... if 'x' in sshd_maps[i].perms: ... print "%s: %s" % (sshd_maps[i].name, sshd_maps[i].perms) ... /usr/sbin/sshd: r-xp /usr/lib64/libnss_files-2.20.so: r-xp /usr/lib64/librt-2.20.so: r-xp /usr/lib64/libkeyutils.so.1.5: r-xp /usr/lib64/libkrb5support.so.0.1: r-xp /usr/lib64/libfreebl3.so: r-xp /usr/lib64/libpthread-2.20.so: r-xp ... """ def __init__(self, pid): self.pid = pid self.entries = [] self.reload() def parse_entry(self, f, line): lines = [] if not line: line = f.readline().strip() if not line: return lines.append(line) while True: line = f.readline() if not line: break line = line.strip() if line.split()[0][-1] == ':': lines.append(line) else: break self.entries.append(smaps_lib(lines)) return line def __len__(self): return len(self.entries) def __getitem__(self, index): return self.entries[index] def reload(self): line = None with open(f"/proc/{self.pid}/smaps") as f: while True: line = self.parse_entry(f, line) if not line: break self.nr_entries = len(self.entries) def find_by_name_fragment(self, fragment): result = [] for i in range(self.nr_entries): if self.entries[i].name and \ self.entries[i].name.find(fragment) >= 0: result.append(self.entries[i]) return result class cpustat: """ CPU statistics, obtained from a line in the '/proc/stat' file, Please refer to 'man procfs(5)' for further information about the '/proc/stat' file, that is the source of the information provided by this class. """ def __init__(self, fields): self.name = fields[0] (self.user, self.nice, self.system, self.idle, self.iowait, self.irq, self.softirq) = [int(i) for i in fields[1:8]] if len(fields) > 7: self.steal = int(fields[7]) if len(fields) > 8: self.guest = int(fields[8]) def __repr__(self): s = f"< user: {self.user}, nice: {self.nice}, system: {self.system}, idle: {self.idle}, iowait: {self.iowait}, irq: {self.irq}, softirq: {self.softirq}" if hasattr(self, 'steal'): s += f", steal: {self.steal}" if hasattr(self, 'guest'): s += f", guest: {self.guest}" return s + ">" class cpusstats: """ Dictionary with information about CPUs in the system. First entry in the dictionary gives an aggregate view of all CPUs, each other entry is about separate CPUs. Please refer to 'man procfs(5)' for further information about the '/proc/stat' file, that is the source of the information provided by this class. """ def __init__(self, filename="/proc/stat"): self.entries = {} self.time = None self.hertz = os.sysconf(2) self.filename = filename self.reload() def __iter__(self): return iter(self.entries) def __getitem__(self, key): return self.entries[key] def __len__(self): return len(list(self.entries.keys())) def keys(self): return list(self.entries.keys()) def values(self): return list(self.entries.values()) def items(self): return self.entries def reload(self): last_entries = self.entries self.entries = {} with open(self.filename) as f: for line in f.readlines(): fields = line.strip().split() if fields[0][:3].lower() != "cpu": continue c = cpustat(fields) if c.name == "cpu": idx = 0 else: idx = int(c.name[3:]) + 1 self.entries[idx] = c last_time = self.time self.time = time.time() if last_entries: delta_sec = self.time - last_time interval_hz = delta_sec * self.hertz for cpu in list(self.entries.keys()): if cpu not in last_entries: curr.usage = 0 continue curr = self.entries[cpu] prev = last_entries[cpu] delta = (curr.user - prev.user) + \ (curr.nice - prev.nice) + \ (curr.system - prev.system) curr.usage = (delta / interval_hz) * 100 curr.usage = min(curr.usage, 100) if __name__ == '__main__': import sys ints = interrupts() for i in list(ints.interrupts.keys()): print(f"{i}: {ints.interrupts[i]}") options = cmdline() for o in list(options.options.keys()): print(f"{o}: {options.options[o]}") cpu = cpuinfo() print(f"\ncpuinfo data: {cpu.nr_cpus} processors") for tag in list(cpu.keys()): print(f"{tag}={cpu[tag]}") print("smaps:\n" + ("-" * 40)) s = smaps(int(sys.argv[1])) for i in range(s.nr_entries): print(f"{s.entries[i].vm_start:#x} {s.entries[i].name}") print("-" * 40) for a in s.find_by_name_fragment(sys.argv[2]): print(a["Size"]) ps = pidstats() print(ps[1]) cs = cpusstats() while True: time.sleep(1) cs.reload() for cpu in cs: print(f"{cpu}: {cs[cpu]}") print("-" * 10)