Add complete guide and all config variants

This commit is contained in:
renato97
2026-02-05 14:06:25 +00:00
parent 239ee0e593
commit b40c76762c
1053 changed files with 167761 additions and 0 deletions

Binary file not shown.

View File

@@ -0,0 +1,4 @@
from os.path import dirname, basename, isfile
import glob
modules = glob.glob(dirname(__file__)+"/*.py")
__all__ = [ basename(f)[:-3] for f in modules if isfile(f) and not f.endswith('__init__.py')]

View File

@@ -0,0 +1,469 @@
import subprocess, plistlib, sys, os, time, json
sys.path.append(os.path.abspath(os.path.dirname(os.path.realpath(__file__))))
import run
if sys.version_info < (3,0):
# Force use of StringIO instead of cStringIO as the latter
# has issues with Unicode strings
from StringIO import StringIO
class Disk:
def __init__(self):
self.r = run.Run()
self.diskutil = self.get_diskutil()
self.os_version = ".".join(
self.r.run({"args":["sw_vers", "-productVersion"]})[0].split(".")[:2]
)
self.full_os_version = self.r.run({"args":["sw_vers", "-productVersion"]})[0]
if len(self.full_os_version.split(".")) < 3:
# Add .0 in case of 10.14
self.full_os_version += ".0"
self.sudo_mount_version = "10.13.6"
self.sudo_mount_types = ["efi"]
self.apfs = {}
self._update_disks()
def _get_str(self, val):
# Helper method to return a string value based on input type
if (sys.version_info < (3,0) and isinstance(val, unicode)) or (sys.version_info >= (3,0) and isinstance(val, bytes)):
return val.encode("utf-8")
return str(val)
def _get_plist(self, s):
p = {}
try:
if sys.version_info >= (3, 0):
p = plistlib.loads(s.encode("utf-8"))
else:
# p = plistlib.readPlistFromString(s)
# We avoid using readPlistFromString() as that uses
# cStringIO and fails when Unicode strings are detected
# Don't subclass - keep the parser local
from xml.parsers.expat import ParserCreate
# Create a new PlistParser object - then we need to set up
# the values and parse.
pa = plistlib.PlistParser()
# We also monkey patch this to encode unicode as utf-8
def end_string():
d = pa.getData()
if isinstance(d,unicode):
d = d.encode("utf-8")
pa.addObject(d)
pa.end_string = end_string
parser = ParserCreate()
parser.StartElementHandler = pa.handleBeginElement
parser.EndElementHandler = pa.handleEndElement
parser.CharacterDataHandler = pa.handleData
if isinstance(s, unicode):
# Encode unicode -> string; use utf-8 for safety
s = s.encode("utf-8")
# Parse the string
parser.Parse(s, 1)
p = pa.root
except Exception as e:
print(e)
pass
return p
def _compare_versions(self, vers1, vers2, pad = -1):
# Helper method to compare ##.## strings
#
# vers1 < vers2 = True
# vers1 = vers2 = None
# vers1 > vers2 = False
#
# Must be separated with a period
# Sanitize the pads
pad = -1 if not type(pad) is int else pad
# Cast as strings
vers1 = str(vers1)
vers2 = str(vers2)
# Split to lists
v1_parts = vers1.split(".")
v2_parts = vers2.split(".")
# Equalize lengths
if len(v1_parts) < len(v2_parts):
v1_parts.extend([str(pad) for x in range(len(v2_parts) - len(v1_parts))])
elif len(v2_parts) < len(v1_parts):
v2_parts.extend([str(pad) for x in range(len(v1_parts) - len(v2_parts))])
# Iterate and compare
for i in range(len(v1_parts)):
# Remove non-numeric
v1 = ''.join(c for c in v1_parts[i] if c.isdigit())
v2 = ''.join(c for c in v2_parts[i] if c.isdigit())
# If empty - make it a pad var
v1 = pad if not len(v1) else v1
v2 = pad if not len(v2) else v2
# Compare
if int(v1) < int(v2):
return True
elif int(v1) > int(v2):
return False
# Never differed - return None, must be equal
return None
def update(self):
self._update_disks()
def _update_disks(self):
self.disks = self.get_disks()
self.disk_text = self.get_disk_text()
if self._compare_versions("10.12", self.os_version):
self.apfs = self.get_apfs()
else:
self.apfs = {}
def get_diskutil(self):
# Returns the path to the diskutil binary
return self.r.run({"args":["which", "diskutil"]})[0].split("\n")[0].split("\r")[0]
def get_disks(self):
# Returns a dictionary object of connected disks
disk_list = self.r.run({"args":[self.diskutil, "list", "-plist"]})[0]
return self._get_plist(disk_list)
def get_disk_text(self):
# Returns plain text listing connected disks
return self.r.run({"args":[self.diskutil, "list"]})[0]
def get_disk_info(self, disk):
disk_id = self.get_identifier(disk)
if not disk_id:
return None
disk_list = self.r.run({"args":[self.diskutil, "info", "-plist", disk_id]})[0]
return self._get_plist(disk_list)
def get_disk_fs(self, disk):
disk_id = self.get_identifier(disk)
if not disk_id:
return None
return self.get_disk_info(disk_id).get("FilesystemName", None)
def get_disk_fs_type(self, disk):
disk_id = self.get_identifier(disk)
if not disk_id:
return None
return self.get_disk_info(disk_id).get("FilesystemType", None)
def get_apfs(self):
# Returns a dictionary object of apfs disks
output = self.r.run({"args":"echo y | " + self.diskutil + " apfs list -plist", "shell" : True})
if not output[2] == 0:
# Error getting apfs info - return an empty dict
return {}
disk_list = output[0]
p_list = disk_list.split("<?xml")
if len(p_list) > 1:
# We had text before the start - get only the plist info
disk_list = "<?xml" + p_list[-1]
return self._get_plist(disk_list)
def is_apfs(self, disk):
disk_id = self.get_identifier(disk)
if not disk_id:
return None
# Takes a disk identifier, and returns whether or not it's apfs
for d in self.disks.get("AllDisksAndPartitions", []):
if not "APFSVolumes" in d:
continue
if d.get("DeviceIdentifier", "").lower() == disk_id.lower():
return True
for a in d.get("APFSVolumes", []):
if a.get("DeviceIdentifier", "").lower() == disk_id.lower():
return True
return False
def is_apfs_container(self, disk):
disk_id = self.get_identifier(disk)
if not disk_id:
return None
# Takes a disk identifier, and returns whether or not that specific
# disk/volume is an APFS Container
for d in self.disks.get("AllDisksAndPartitions", []):
# Only check partitions
for p in d.get("Partitions", []):
if disk_id.lower() == p.get("DeviceIdentifier", "").lower():
return p.get("Content", "").lower() == "apple_apfs"
return False
def is_cs_container(self, disk):
disk_id = self.get_identifier(disk)
if not disk_id:
return None
# Takes a disk identifier, and returns whether or not that specific
# disk/volume is an CoreStorage Container
for d in self.disks.get("AllDisksAndPartitions", []):
# Only check partitions
for p in d.get("Partitions", []):
if disk_id.lower() == p.get("DeviceIdentifier", "").lower():
return p.get("Content", "").lower() == "apple_corestorage"
return False
def is_core_storage(self, disk):
disk_id = self.get_identifier(disk)
if not disk_id:
return None
if self._get_physical_disk(disk_id, "Logical Volume on "):
return True
return False
def get_identifier(self, disk):
# Should be able to take a mount point, disk name, or disk identifier,
# and return the disk's identifier
# Iterate!!
if not disk or not len(self._get_str(disk)):
return None
disk = self._get_str(disk).lower()
if disk.startswith("/dev/r"):
disk = disk[len("/dev/r"):]
elif disk.startswith("/dev/"):
disk = disk[len("/dev/"):]
if disk in self.disks.get("AllDisks", []):
return disk
for d in self.disks.get("AllDisksAndPartitions", []):
for a in d.get("APFSVolumes", []):
if disk in [ self._get_str(a.get(x, "")).lower() for x in ["DeviceIdentifier", "VolumeName", "VolumeUUID", "DiskUUID", "MountPoint"] ]:
return a.get("DeviceIdentifier", None)
for a in d.get("Partitions", []):
if disk in [ self._get_str(a.get(x, "")).lower() for x in ["DeviceIdentifier", "VolumeName", "VolumeUUID", "DiskUUID", "MountPoint"] ]:
return a.get("DeviceIdentifier", None)
# At this point, we didn't find it
return None
def get_top_identifier(self, disk):
disk_id = self.get_identifier(disk)
if not disk_id:
return None
return disk_id.replace("disk", "didk").split("s")[0].replace("didk", "disk")
def _get_physical_disk(self, disk, search_term):
# Change disk0s1 to disk0
our_disk = self.get_top_identifier(disk)
our_term = "/dev/" + our_disk
found_disk = False
our_text = ""
for line in self.disk_text.split("\n"):
if line.lower().startswith(our_term):
found_disk = True
continue
if not found_disk:
continue
if line.lower().startswith("/dev/disk"):
# At the next disk - bail
break
if search_term.lower() in line.lower():
our_text = line
break
if not len(our_text):
# Nothing found
return None
our_stores = "".join(our_text.strip().split(search_term)[1:]).split(" ,")
if not len(our_stores):
return None
for store in our_stores:
efi = self.get_efi(store)
if efi:
return store
return None
def get_physical_store(self, disk):
# Returns the physical store containing the EFI
disk_id = self.get_identifier(disk)
if not disk_id:
return None
if not self.is_apfs(disk_id):
return None
return self._get_physical_disk(disk_id, "Physical Store ")
def get_core_storage_pv(self, disk):
# Returns the core storage physical volume containing the EFI
disk_id = self.get_identifier(disk)
if not disk_id:
return None
if not self.is_core_storage(disk_id):
return None
return self._get_physical_disk(disk_id, "Logical Volume on ")
def get_parent(self, disk):
# Disk can be a mount point, disk name, or disk identifier
disk_id = self.get_identifier(disk)
if self.is_apfs(disk_id):
disk_id = self.get_physical_store(disk_id)
elif self.is_core_storage(disk_id):
disk_id = self.get_core_storage_pv(disk_id)
if not disk_id:
return None
if self.is_apfs(disk_id):
# We have apfs - let's get the container ref
for a in self.apfs.get("Containers", []):
# Check if it's the whole container
if a.get("ContainerReference", "").lower() == disk_id.lower():
return a["ContainerReference"]
# Check through each volume and return the parent's container ref
for v in a.get("Volumes", []):
if v.get("DeviceIdentifier", "").lower() == disk_id.lower():
return a.get("ContainerReference", None)
else:
# Not apfs - go through all volumes and whole disks
for d in self.disks.get("AllDisksAndPartitions", []):
if d.get("DeviceIdentifier", "").lower() == disk_id.lower():
return d["DeviceIdentifier"]
for p in d.get("Partitions", []):
if p.get("DeviceIdentifier", "").lower() == disk_id.lower():
return d["DeviceIdentifier"]
# Didn't find anything
return None
def get_efi(self, disk):
disk_id = self.get_parent(self.get_identifier(disk))
if not disk_id:
return None
# At this point - we should have the parent
for d in self.disks["AllDisksAndPartitions"]:
if d.get("DeviceIdentifier", "").lower() == disk_id.lower():
# Found our disk
for p in d.get("Partitions", []):
if p.get("Content", "").lower() == "efi":
return p.get("DeviceIdentifier", None)
return None
def mount_partition(self, disk):
disk_id = self.get_identifier(disk)
if not disk_id:
return None
sudo = False
if not self._compare_versions(self.full_os_version, self.sudo_mount_version) and self.get_content(disk_id).lower() in self.sudo_mount_types:
sudo = True
out = self.r.run({"args":[self.diskutil, "mount", disk_id], "sudo":sudo})
self._update_disks()
return out
def unmount_partition(self, disk):
disk_id = self.get_identifier(disk)
if not disk_id:
return None
out = self.r.run({"args":[self.diskutil, "unmount", disk_id]})
self._update_disks()
return out
def is_mounted(self, disk):
disk_id = self.get_identifier(disk)
if not disk_id:
return None
m = self.get_mount_point(disk_id)
return (m != None and len(m))
def get_volumes(self):
# Returns a list object with all volumes from disks
return self.disks.get("VolumesFromDisks", [])
def _get_value_apfs(self, disk, field, default = None):
return self._get_value(disk, field, default, True)
def _get_value(self, disk, field, default = None, apfs_only = False):
disk_id = self.get_identifier(disk)
if not disk_id:
return None
# Takes a disk identifier, and returns the requested value
for d in self.disks.get("AllDisksAndPartitions", []):
for a in d.get("APFSVolumes", []):
if a.get("DeviceIdentifier", "").lower() == disk_id.lower():
return a.get(field, default)
if apfs_only:
# Skip looking at regular partitions
continue
if d.get("DeviceIdentifier", "").lower() == disk_id.lower():
return d.get(field, default)
for a in d.get("Partitions", []):
if a.get("DeviceIdentifier", "").lower() == disk_id.lower():
return a.get(field, default)
return None
# Getter methods
def get_content(self, disk):
return self._get_value(disk, "Content")
def get_volume_name(self, disk):
return self._get_value(disk, "VolumeName")
def get_volume_uuid(self, disk):
return self._get_value(disk, "VolumeUUID")
def get_disk_uuid(self, disk):
return self._get_value(disk, "DiskUUID")
def get_mount_point(self, disk):
return self._get_value(disk, "MountPoint")
def open_mount_point(self, disk, new_window = False):
disk_id = self.get_identifier(disk)
if not disk_id:
return None
mount = self.get_mount_point(disk_id)
if not mount:
return None
out = self.r.run({"args":["open", mount]})
return out[2] == 0
def get_mounted_volumes(self):
# Returns a list of mounted volumes
vol_list = self.r.run({"args":["ls", "-1", "/Volumes"]})[0].split("\n")
vol_list = [ x for x in vol_list if x != "" ]
return vol_list
def get_mounted_volume_dicts(self):
# Returns a list of dicts of name, identifier, mount point dicts
vol_list = []
for v in self.get_mounted_volumes():
i = self.get_identifier(os.path.join("/Volumes", v))
if i == None:
i = self.get_identifier("/")
if not self.get_volume_name(i) == v:
# Not valid and not our boot drive
continue
vol_list.append({
"name" : self.get_volume_name(i),
"identifier" : i,
"mount_point" : self.get_mount_point(i),
"disk_uuid" : self.get_disk_uuid(i),
"volume_uuid" : self.get_volume_uuid(i)
})
return vol_list
def get_disks_and_partitions_dict(self):
# Returns a list of dictionaries like so:
# { "disk0" : { "partitions" : [
# {
# "identifier" : "disk0s1",
# "name" : "EFI",
# "mount_point" : "/Volumes/EFI"
# }
# ] } }
disks = {}
for d in self.disks.get("AllDisks", []):
# Get the parent and make sure it has an entry
parent = self.get_parent(d)
top_disk = self.get_top_identifier(d)
if top_disk == d and not self.is_core_storage(d):
# Top level, skip
continue
# Not top level - make sure it's not an apfs container or core storage container
if self.is_apfs_container(d):
continue
if self.is_cs_container(d):
continue
if not parent in disks:
disks[parent] = { "partitions" : [] }
disks[parent]["partitions"].append({
"name" : self.get_volume_name(d),
"identifier" : d,
"mount_point" : self.get_mount_point(d),
"disk_uuid" : self.get_disk_uuid(d),
"volume_uuid" : self.get_volume_uuid(d)
})
return disks

View File

@@ -0,0 +1,181 @@
import subprocess, plistlib, sys, os, time, json, csv
sys.path.append(os.path.abspath(os.path.dirname(os.path.realpath(__file__))))
from Scripts import run
class Disk:
def __init__(self):
self.r = run.Run()
self.wmic = self._get_wmic()
if self.wmic and not os.path.exists(self.wmic):
self.wmic = None
self.disks = {}
self._update_disks()
def _get_wmic(self):
# Attempt to locate WMIC.exe
wmic_list = self.r.run({"args":["where","wmic"]})[0].replace("\r","").split("\n")
if wmic_list:
return wmic_list[0]
return None
def update(self):
self._update_disks()
def _update_disks(self):
self.disks = self.get_disks()
def _get_rows(self, row_list):
rows = []
last_row = []
for row in row_list:
if not row.strip(): # Empty
if last_row: # Got a row at least - append it and reset
rows.append(last_row)
last_row = []
continue # Skip anything else
# Not an empty row - let's try to get the info
try: last_row.append(" : ".join(row.split(" : ")[1:]))
except: pass
return rows
def _get_diskdrive(self):
disks = []
if self.wmic: # Use WMIC where possible
try:
wmic = self.r.run({"args":[self.wmic, "DiskDrive", "get", "DeviceID,Index,Model,Partitions,Size", "/format:csv"]})[0]
# Get the rows - but skip the first 2 (empty, headers) and the last 1 (empty again)
disks = list(csv.reader(wmic.replace("\r","").split("\n"), delimiter=","))[2:-1]
# We need to skip the Node value for each row as well
disks = [x[1:] for x in disks]
except:
pass
if not disks: # Use PowerShell and parse the info manually
try:
ps = self.r.run({"args":["powershell", "-c", "Get-WmiObject -Class Win32_DiskDrive | Format-List -Property DeviceID,Index,Model,Partitions,Size"]})[0]
# We need to iterate the rows and add each column manually
disks = self._get_rows(ps.replace("\r","").split("\n"))
except:
pass
return disks
def _get_ldtop(self):
disks = []
if self.wmic: # Use WMIC where possible
try:
wmic = self.r.run({"args":[self.wmic, "path", "Win32_LogicalDiskToPartition", "get", "Antecedent,Dependent"]})[0]
# Get the rows - but skip the first and last as they're empty
disks = wmic.replace("\r","").split("\n")[1:-1]
except:
pass
if not disks: # Use PowerShell and parse the info manually
try:
ps = self.r.run({"args":["powershell", "-c", "Get-WmiObject -Class Win32_LogicalDiskToPartition | Format-List -Property Antecedent,Dependent"]})[0]
# We need to iterate the rows and add each column manually
disks = self._get_rows(ps.replace("\r","").split("\n"))
# We need to join the values with 2 spaces to match the WMIC output
disks = [" ".join(x) for x in disks]
except:
pass
return disks
def _get_logicaldisk(self):
disks = []
if self.wmic: # Use WMIC where possible
try:
wmic = self.r.run({"args":[self.wmic, "LogicalDisk", "get", "DeviceID,DriveType,FileSystem,Size,VolumeName", "/format:csv"]})[0]
# Get the rows - but skip the first 2 (empty, headers) and the last 1 (empty again)
disks = list(csv.reader(wmic.replace("\r","").split("\n"), delimiter=","))[2:-1]
# We need to skip the Node value for each row as well
disks = [x[1:] for x in disks]
except:
pass
if not disks: # Use PowerShell and parse the info manually
try:
ps = self.r.run({"args":["powershell", "-c", "Get-WmiObject -Class Win32_LogicalDisk | Format-List -Property DeviceID,DriveType,FileSystem,Size,VolumeName"]})[0]
# We need to iterate the rows and add each column manually
disks = self._get_rows(ps.replace("\r","").split("\n"))
except:
pass
return disks
def get_disks(self):
# We hate windows... all of us.
#
# This has to be done in 3 commands,
# 1. To get the PHYSICALDISK entries, index, and model
# 2. To get the drive letter, volume name, fs, and size
# 3. To get some connection between them...
#
# May you all forgive me...
disks = self._get_diskdrive()
p_disks = {}
for ds in disks:
if len(ds) < 5:
continue
p_disks[ds[1]] = {
"device":ds[0],
"model":" ".join(ds[2:-2]),
"type":0 # 0 = Unknown, 1 = No Root Dir, 2 = Removable, 3 = Local, 4 = Network, 5 = Disc, 6 = RAM disk
}
# More fault-tolerance with ints
p_disks[ds[1]]["index"] = int(ds[1]) if len(ds[1]) else -1
p_disks[ds[1]]["size"] = int(ds[-1]) if len(ds[-1]) else -1
p_disks[ds[1]]["partitioncount"] = int(ds[-2]) if len(ds[-2]) else 0
if not p_disks:
# Drat, nothing
return p_disks
# Let's find a way to map this biz now
ldtop = self._get_ldtop()
for l in ldtop:
l = l.lower()
d = p = mp = None
try:
dp = l.split("deviceid=")[1].split('"')[1]
mp = l.split("deviceid=")[-1].split('"')[1].upper()
d = dp.split("disk #")[1].split(",")[0]
p = dp.split("partition #")[1]
except:
pass
if any([d, p, mp]):
# Got *something*
if p_disks.get(d,None):
if not p_disks[d].get("partitions",None):
p_disks[d]["partitions"] = {}
p_disks[d]["partitions"][p] = {"letter":mp}
# Last attempt to do this - let's get the partition names!
parts = self._get_logicaldisk()
if not parts:
return p_disks
for ps in parts:
if len(ps) < 2:
# Need the drive letter and disk type at minimum
continue
# Organize!
plt = ps[0] # get letter
ptp = ps[1] # get disk type
# Initialize
pfs = pnm = None
psz = -1 # Set to -1 initially for indeterminate size
try:
pfs = ps[2] # get file system
psz = ps[3] # get size
pnm = ps[4] # get the rest in the name
except:
pass
for d in p_disks:
p_dict = p_disks[d]
for pr in p_dict.get("partitions",{}):
pr = p_dict["partitions"][pr]
if pr.get("letter","").upper() == plt.upper():
# Found it - set all attributes
pr["size"] = int(psz) if len(psz) else -1
pr["file system"] = pfs
pr["name"] = pnm
# Also need to set the parent drive's type
if len(ptp):
p_dict["type"] = int(ptp)
break
return p_disks

View File

@@ -0,0 +1,354 @@
import sys, os, time, ssl, gzip, multiprocessing
from io import BytesIO
# Python-aware urllib stuff
try:
from urllib.request import urlopen, Request
import queue as q
except ImportError:
# Import urllib2 to catch errors
import urllib2
from urllib2 import urlopen, Request
import Queue as q
TERMINAL_WIDTH = 120 if os.name=="nt" else 80
def get_size(size, suffix=None, use_1024=False, round_to=2, strip_zeroes=False):
# size is the number of bytes
# suffix is the target suffix to locate (B, KB, MB, etc) - if found
# use_2014 denotes whether or not we display in MiB vs MB
# round_to is the number of dedimal points to round our result to (0-15)
# strip_zeroes denotes whether we strip out zeroes
# Failsafe in case our size is unknown
if size == -1:
return "Unknown"
# Get our suffixes based on use_1024
ext = ["B","KiB","MiB","GiB","TiB","PiB"] if use_1024 else ["B","KB","MB","GB","TB","PB"]
div = 1024 if use_1024 else 1000
s = float(size)
s_dict = {} # Initialize our dict
# Iterate the ext list, and divide by 1000 or 1024 each time to setup the dict {ext:val}
for e in ext:
s_dict[e] = s
s /= div
# Get our suffix if provided - will be set to None if not found, or if started as None
suffix = next((x for x in ext if x.lower() == suffix.lower()),None) if suffix else suffix
# Get the largest value that's still over 1
biggest = suffix if suffix else next((x for x in ext[::-1] if s_dict[x] >= 1), "B")
# Determine our rounding approach - first make sure it's an int; default to 2 on error
try:round_to=int(round_to)
except:round_to=2
round_to = 0 if round_to < 0 else 15 if round_to > 15 else round_to # Ensure it's between 0 and 15
bval = round(s_dict[biggest], round_to)
# Split our number based on decimal points
a,b = str(bval).split(".")
# Check if we need to strip or pad zeroes
b = b.rstrip("0") if strip_zeroes else b.ljust(round_to,"0") if round_to > 0 else ""
return "{:,}{} {}".format(int(a),"" if not b else "."+b,biggest)
def _process_hook(queue, total_size, bytes_so_far=0, update_interval=1.0, max_packets=0):
packets = []
speed = remaining = ""
last_update = time.time()
while True:
# Write our info first so we have *some* status while
# waiting for packets
if total_size > 0:
percent = float(bytes_so_far) / total_size
percent = round(percent*100, 2)
t_s = get_size(total_size)
try:
b_s = get_size(bytes_so_far, t_s.split(" ")[1])
except:
b_s = get_size(bytes_so_far)
perc_str = " {:.2f}%".format(percent)
bar_width = (TERMINAL_WIDTH // 3)-len(perc_str)
progress = "=" * int(bar_width * (percent/100))
sys.stdout.write("\r\033[K{}/{} | {}{}{}{}{}".format(
b_s,
t_s,
progress,
" " * (bar_width-len(progress)),
perc_str,
speed,
remaining
))
else:
b_s = get_size(bytes_so_far)
sys.stdout.write("\r\033[K{}{}".format(b_s, speed))
sys.stdout.flush()
# Now we gather the next packet
try:
packet = queue.get(timeout=update_interval)
# Packets should be formatted as a tuple of
# (timestamp, len(bytes_downloaded))
# If "DONE" is passed, we assume the download
# finished - and bail
if packet == "DONE":
print("") # Jump to the next line
return
# Append our packet to the list and ensure we're not
# beyond our max.
# Only check max if it's > 0
packets.append(packet)
if max_packets > 0:
packets = packets[-max_packets:]
# Increment our bytes so far as well
bytes_so_far += packet[1]
except q.Empty:
# Didn't get anything - reset the speed
# and packets
packets = []
speed = " | 0 B/s"
remaining = " | ?? left" if total_size > 0 else ""
except KeyboardInterrupt:
print("") # Jump to the next line
return
# If we have packets and it's time for an update, process
# the info.
update_check = time.time()
if packets and update_check - last_update >= update_interval:
last_update = update_check # Refresh our update timestamp
speed = " | ?? B/s"
if len(packets) > 1:
# Let's calculate the amount downloaded over how long
try:
first,last = packets[0][0],packets[-1][0]
chunks = sum([float(x[1]) for x in packets])
t = last-first
assert t >= 0
bytes_speed = 1. / t * chunks
speed = " | {}/s".format(get_size(bytes_speed,round_to=1))
# Get our remaining time
if total_size > 0:
seconds_left = (total_size-bytes_so_far) / bytes_speed
days = seconds_left // 86400
hours = (seconds_left - (days*86400)) // 3600
mins = (seconds_left - (days*86400) - (hours*3600)) // 60
secs = seconds_left - (days*86400) - (hours*3600) - (mins*60)
if days > 99 or bytes_speed == 0:
remaining = " | ?? left"
else:
remaining = " | {}{:02d}:{:02d}:{:02d} left".format(
"{}:".format(int(days)) if days else "",
int(hours),
int(mins),
int(round(secs))
)
except:
pass
# Clear the packets so we don't reuse the same ones
packets = []
class Downloader:
def __init__(self,**kwargs):
self.ua = kwargs.get("useragent",{"User-Agent":"Mozilla"})
self.chunk = None # Auto-assign if None, otherwise explicit
self.min_chunk = 1024 # 1 KiB min chunk size
self.max_chunk = 1024 * 1024 * 4 # 4 MiB max chunk size
self.chunk_rate = 0.1 # Update every 0.1 seconds
self.chunk_growth = 1.5 # Max multiplier for chunk growth
if os.name=="nt": os.system("color") # Initialize cmd for ANSI escapes
# Provide reasonable default logic to workaround macOS CA file handling
cafile = ssl.get_default_verify_paths().openssl_cafile
try:
# If default OpenSSL CA file does not exist, use that from certifi
if not os.path.exists(cafile):
import certifi
cafile = certifi.where()
self.ssl_context = ssl.create_default_context(cafile=cafile)
except:
# None of the above worked, disable certificate verification for now
self.ssl_context = ssl._create_unverified_context()
return
def _decode(self, value, encoding="utf-8", errors="ignore"):
# Helper method to only decode if bytes type
if sys.version_info >= (3,0) and isinstance(value, bytes):
return value.decode(encoding,errors)
return value
def _update_main_name(self):
# Windows running python 2 seems to have issues with multiprocessing
# if the case of the main script's name is incorrect:
# e.g. Downloader.py vs downloader.py
#
# To work around this, we try to scrape for the correct case if
# possible.
try:
path = os.path.abspath(sys.modules["__main__"].__file__)
except AttributeError as e:
# This likely means we're running from the interpreter
# directly
return None
if not os.path.isfile(path):
return None
# Get the file name and folder path
name = os.path.basename(path).lower()
fldr = os.path.dirname(path)
# Walk the files in the folder until we find our
# name - then steal its case and update that path
for f in os.listdir(fldr):
if f.lower() == name:
# Got it
new_path = os.path.join(fldr,f)
sys.modules["__main__"].__file__ = new_path
return new_path
# If we got here, it wasn't found
return None
def _get_headers(self, headers = None):
# Fall back on the default ua if none provided
target = headers if isinstance(headers,dict) else self.ua
new_headers = {}
# Shallow copy to prevent changes to the headers
# overriding the original
for k in target:
new_headers[k] = target[k]
return new_headers
def open_url(self, url, headers = None):
headers = self._get_headers(headers)
# Wrap up the try/except block so we don't have to do this for each function
try:
response = urlopen(Request(url, headers=headers), context=self.ssl_context)
except Exception as e:
# No fixing this - bail
return None
return response
def get_size(self, *args, **kwargs):
return get_size(*args,**kwargs)
def get_string(self, url, progress = True, headers = None, expand_gzip = True):
response = self.get_bytes(url,progress,headers,expand_gzip)
if response is None: return None
return self._decode(response)
def get_bytes(self, url, progress = True, headers = None, expand_gzip = True):
response = self.open_url(url, headers)
if response is None: return None
try: total_size = int(response.headers['Content-Length'])
except: total_size = -1
chunk_so_far = b""
packets = queue = process = None
if progress:
# Make sure our vars are initialized
packets = [] if progress else None
queue = multiprocessing.Queue()
# Create the multiprocess and start it
process = multiprocessing.Process(
target=_process_hook,
args=(queue,total_size)
)
process.daemon = True
# Filthy hack for earlier python versions on Windows
if os.name == "nt" and hasattr(multiprocessing,"forking"):
self._update_main_name()
process.start()
try:
chunk_size = self.chunk or 1024
auto_chunk_size = not self.chunk
while True:
t = time.perf_counter()
chunk = response.read(chunk_size)
chunk_time = time.perf_counter()-t
if progress:
# Add our items to the queue
queue.put((time.time(),len(chunk)))
if not chunk: break
chunk_so_far += chunk
if auto_chunk_size:
# Adjust our chunk size based on the internet speed at our defined rate
chunk_rate = int(len(chunk) / chunk_time * self.chunk_rate)
chunk_change_max = round(chunk_size * self.chunk_growth)
chunk_rate_clamped = min(max(self.min_chunk, chunk_rate), chunk_change_max)
chunk_size = min(chunk_rate_clamped, self.max_chunk)
finally:
# Close the response whenever we're done
response.close()
if expand_gzip and response.headers.get("Content-Encoding","unknown").lower() == "gzip":
fileobj = BytesIO(chunk_so_far)
gfile = gzip.GzipFile(fileobj=fileobj)
return gfile.read()
if progress:
# Finalize the queue and wait
queue.put("DONE")
process.join()
return chunk_so_far
def stream_to_file(self, url, file_path, progress = True, headers = None, ensure_size_if_present = True, allow_resume = False):
response = self.open_url(url, headers)
if response is None: return None
bytes_so_far = 0
try: total_size = int(response.headers['Content-Length'])
except: total_size = -1
packets = queue = process = None
mode = "wb"
if allow_resume and os.path.isfile(file_path) and total_size != -1:
# File exists, we're resuming and have a target size. Check the
# local file size.
current_size = os.stat(file_path).st_size
if current_size == total_size:
# File is already complete - return the path
return file_path
elif current_size < total_size:
response.close()
# File is not complete - seek to our current size
bytes_so_far = current_size
mode = "ab" # Append
# We also need to try creating a new request
# in order to pass our range header
new_headers = self._get_headers(headers)
# Get the start byte, 0-indexed
byte_string = "bytes={}-".format(current_size)
new_headers["Range"] = byte_string
response = self.open_url(url, new_headers)
if response is None: return None
if progress:
# Make sure our vars are initialized
packets = [] if progress else None
queue = multiprocessing.Queue()
# Create the multiprocess and start it
process = multiprocessing.Process(
target=_process_hook,
args=(queue,total_size,bytes_so_far)
)
process.daemon = True
# Filthy hack for earlier python versions on Windows
if os.name == "nt" and hasattr(multiprocessing,"forking"):
self._update_main_name()
process.start()
with open(file_path,mode) as f:
chunk_size = self.chunk or 1024
auto_chunk_size = not self.chunk
try:
while True:
t = time.perf_counter()
chunk = response.read(chunk_size)
chunk_time = time.perf_counter()-t
bytes_so_far += len(chunk)
if progress:
# Add our items to the queue
queue.put((time.time(),len(chunk)))
if not chunk: break
f.write(chunk)
if auto_chunk_size:
# Adjust our chunk size based on the internet speed at our defined rate
chunk_rate = int(len(chunk) / chunk_time * self.chunk_rate)
chunk_change_max = round(chunk_size * self.chunk_growth)
chunk_rate_clamped = min(max(self.min_chunk, chunk_rate), chunk_change_max)
chunk_size = min(chunk_rate_clamped, self.max_chunk)
finally:
# Close the response whenever we're done
response.close()
if progress:
# Finalize the queue and wait
queue.put("DONE")
process.join()
if ensure_size_if_present and total_size != -1:
# We're verifying size - make sure we got what we asked for
if bytes_so_far != total_size:
return None # We didn't - imply it failed
return file_path if os.path.exists(file_path) else None

View File

@@ -0,0 +1,688 @@
### ###
# Imports #
### ###
import datetime, os, plistlib, struct, sys, itertools, binascii
from io import BytesIO
if sys.version_info < (3,0):
# Force use of StringIO instead of cStringIO as the latter
# has issues with Unicode strings
from StringIO import StringIO
else:
from io import StringIO
try:
basestring # Python 2
unicode
except NameError:
basestring = str # Python 3
unicode = str
try:
FMT_XML = plistlib.FMT_XML
FMT_BINARY = plistlib.FMT_BINARY
except AttributeError:
FMT_XML = "FMT_XML"
FMT_BINARY = "FMT_BINARY"
### ###
# Helper Methods #
### ###
def wrap_data(value):
if not _check_py3(): return plistlib.Data(value)
return value
def extract_data(value):
if not _check_py3() and isinstance(value,plistlib.Data): return value.data
return value
def _check_py3():
return sys.version_info >= (3, 0)
def _is_binary(fp):
if isinstance(fp, basestring):
return fp.startswith(b"bplist00")
header = fp.read(32)
fp.seek(0)
return header[:8] == b'bplist00'
def _seek_past_whitespace(fp):
offset = 0
while True:
byte = fp.read(1)
if not byte:
# End of file, reset offset and bail
offset = 0
break
if not byte.isspace():
# Found our first non-whitespace character
break
offset += 1
# Seek to the first non-whitespace char
fp.seek(offset)
return offset
### ###
# Deprecated Functions - Remapped #
### ###
def readPlist(pathOrFile):
if not isinstance(pathOrFile, basestring):
return load(pathOrFile)
with open(pathOrFile, "rb") as f:
return load(f)
def writePlist(value, pathOrFile):
if not isinstance(pathOrFile, basestring):
return dump(value, pathOrFile, fmt=FMT_XML, sort_keys=True, skipkeys=False)
with open(pathOrFile, "wb") as f:
return dump(value, f, fmt=FMT_XML, sort_keys=True, skipkeys=False)
### ###
# Remapped Functions #
### ###
def load(fp, fmt=None, use_builtin_types=None, dict_type=dict):
if _is_binary(fp):
use_builtin_types = False if use_builtin_types is None else use_builtin_types
try:
p = _BinaryPlistParser(use_builtin_types=use_builtin_types, dict_type=dict_type)
except:
# Python 3.9 removed use_builtin_types
p = _BinaryPlistParser(dict_type=dict_type)
return p.parse(fp)
elif _check_py3():
offset = _seek_past_whitespace(fp)
use_builtin_types = True if use_builtin_types is None else use_builtin_types
# We need to monkey patch this to allow for hex integers - code taken/modified from
# https://github.com/python/cpython/blob/3.8/Lib/plistlib.py
if fmt is None:
header = fp.read(32)
fp.seek(offset)
for info in plistlib._FORMATS.values():
if info['detect'](header):
P = info['parser']
break
else:
raise plistlib.InvalidFileException()
else:
P = plistlib._FORMATS[fmt]['parser']
try:
p = P(use_builtin_types=use_builtin_types, dict_type=dict_type)
except:
# Python 3.9 removed use_builtin_types
p = P(dict_type=dict_type)
if isinstance(p,plistlib._PlistParser):
# Monkey patch!
def end_integer():
d = p.get_data()
value = int(d,16) if d.lower().startswith("0x") else int(d)
if -1 << 63 <= value < 1 << 64:
p.add_object(value)
else:
raise OverflowError("Integer overflow at line {}".format(p.parser.CurrentLineNumber))
def end_data():
try:
p.add_object(plistlib._decode_base64(p.get_data()))
except Exception as e:
raise Exception("Data error at line {}: {}".format(p.parser.CurrentLineNumber,e))
p.end_integer = end_integer
p.end_data = end_data
return p.parse(fp)
else:
offset = _seek_past_whitespace(fp)
# Is not binary - assume a string - and try to load
# We avoid using readPlistFromString() as that uses
# cStringIO and fails when Unicode strings are detected
# Don't subclass - keep the parser local
from xml.parsers.expat import ParserCreate
# Create a new PlistParser object - then we need to set up
# the values and parse.
p = plistlib.PlistParser()
parser = ParserCreate()
parser.StartElementHandler = p.handleBeginElement
parser.EndElementHandler = p.handleEndElement
parser.CharacterDataHandler = p.handleData
# We also need to monkey patch this to allow for other dict_types, hex int support
# proper line output for data errors, and for unicode string decoding
def begin_dict(attrs):
d = dict_type()
p.addObject(d)
p.stack.append(d)
def end_integer():
d = p.getData()
value = int(d,16) if d.lower().startswith("0x") else int(d)
if -1 << 63 <= value < 1 << 64:
p.addObject(value)
else:
raise OverflowError("Integer overflow at line {}".format(parser.CurrentLineNumber))
def end_data():
try:
p.addObject(plistlib.Data.fromBase64(p.getData()))
except Exception as e:
raise Exception("Data error at line {}: {}".format(parser.CurrentLineNumber,e))
def end_string():
d = p.getData()
if isinstance(d,unicode):
d = d.encode("utf-8")
p.addObject(d)
p.begin_dict = begin_dict
p.end_integer = end_integer
p.end_data = end_data
p.end_string = end_string
if isinstance(fp, unicode):
# Encode unicode -> string; use utf-8 for safety
fp = fp.encode("utf-8")
if isinstance(fp, basestring):
# It's a string - let's wrap it up
fp = StringIO(fp)
# Parse it
parser.ParseFile(fp)
return p.root
def loads(value, fmt=None, use_builtin_types=None, dict_type=dict):
if _check_py3() and isinstance(value, basestring):
# If it's a string - encode it
value = value.encode()
try:
return load(BytesIO(value),fmt=fmt,use_builtin_types=use_builtin_types,dict_type=dict_type)
except:
# Python 3.9 removed use_builtin_types
return load(BytesIO(value),fmt=fmt,dict_type=dict_type)
def dump(value, fp, fmt=FMT_XML, sort_keys=True, skipkeys=False):
if fmt == FMT_BINARY:
# Assume binary at this point
writer = _BinaryPlistWriter(fp, sort_keys=sort_keys, skipkeys=skipkeys)
writer.write(value)
elif fmt == FMT_XML:
if _check_py3():
plistlib.dump(value, fp, fmt=fmt, sort_keys=sort_keys, skipkeys=skipkeys)
else:
# We need to monkey patch a bunch here too in order to avoid auto-sorting
# of keys
writer = plistlib.PlistWriter(fp)
def writeDict(d):
if d:
writer.beginElement("dict")
items = sorted(d.items()) if sort_keys else d.items()
for key, value in items:
if not isinstance(key, basestring):
if skipkeys:
continue
raise TypeError("keys must be strings")
writer.simpleElement("key", key)
writer.writeValue(value)
writer.endElement("dict")
else:
writer.simpleElement("dict")
writer.writeDict = writeDict
writer.writeln("<plist version=\"1.0\">")
writer.writeValue(value)
writer.writeln("</plist>")
else:
# Not a proper format
raise ValueError("Unsupported format: {}".format(fmt))
def dumps(value, fmt=FMT_XML, skipkeys=False, sort_keys=True):
# We avoid using writePlistToString() as that uses
# cStringIO and fails when Unicode strings are detected
f = BytesIO() if _check_py3() else StringIO()
dump(value, f, fmt=fmt, skipkeys=skipkeys, sort_keys=sort_keys)
value = f.getvalue()
if _check_py3():
value = value.decode("utf-8")
return value
### ###
# Binary Plist Stuff For Py2 #
### ###
# From the python 3 plistlib.py source: https://github.com/python/cpython/blob/3.11/Lib/plistlib.py
# Tweaked to function on both Python 2 and 3
class UID:
def __init__(self, data):
if not isinstance(data, int):
raise TypeError("data must be an int")
# It seems Apple only uses 32-bit unsigned ints for UIDs. Although the comment in
# CoreFoundation's CFBinaryPList.c detailing the binary plist format theoretically
# allows for 64-bit UIDs, most functions in the same file use 32-bit unsigned ints,
# with the sole function hinting at 64-bits appearing to be a leftover from copying
# and pasting integer handling code internally, and this code has not changed since
# it was added. (In addition, code in CFPropertyList.c to handle CF$UID also uses a
# 32-bit unsigned int.)
#
# if data >= 1 << 64:
# raise ValueError("UIDs cannot be >= 2**64")
if data >= 1 << 32:
raise ValueError("UIDs cannot be >= 2**32 (4294967296)")
if data < 0:
raise ValueError("UIDs must be positive")
self.data = data
def __index__(self):
return self.data
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(self.data))
def __reduce__(self):
return self.__class__, (self.data,)
def __eq__(self, other):
if not isinstance(other, UID):
return NotImplemented
return self.data == other.data
def __hash__(self):
return hash(self.data)
class InvalidFileException (ValueError):
def __init__(self, message="Invalid file"):
ValueError.__init__(self, message)
_BINARY_FORMAT = {1: 'B', 2: 'H', 4: 'L', 8: 'Q'}
_undefined = object()
class _BinaryPlistParser:
"""
Read or write a binary plist file, following the description of the binary
format. Raise InvalidFileException in case of error, otherwise return the
root object.
see also: http://opensource.apple.com/source/CF/CF-744.18/CFBinaryPList.c
"""
def __init__(self, use_builtin_types, dict_type):
self._use_builtin_types = use_builtin_types
self._dict_type = dict_type
def parse(self, fp):
try:
# The basic file format:
# HEADER
# object...
# refid->offset...
# TRAILER
self._fp = fp
self._fp.seek(-32, os.SEEK_END)
trailer = self._fp.read(32)
if len(trailer) != 32:
raise InvalidFileException()
(
offset_size, self._ref_size, num_objects, top_object,
offset_table_offset
) = struct.unpack('>6xBBQQQ', trailer)
self._fp.seek(offset_table_offset)
self._object_offsets = self._read_ints(num_objects, offset_size)
self._objects = [_undefined] * num_objects
return self._read_object(top_object)
except (OSError, IndexError, struct.error, OverflowError,
UnicodeDecodeError):
raise InvalidFileException()
def _get_size(self, tokenL):
""" return the size of the next object."""
if tokenL == 0xF:
m = self._fp.read(1)[0]
if not _check_py3():
m = ord(m)
m = m & 0x3
s = 1 << m
f = '>' + _BINARY_FORMAT[s]
return struct.unpack(f, self._fp.read(s))[0]
return tokenL
def _read_ints(self, n, size):
data = self._fp.read(size * n)
if size in _BINARY_FORMAT:
return struct.unpack('>' + _BINARY_FORMAT[size] * n, data)
else:
if not size or len(data) != size * n:
raise InvalidFileException()
return tuple(int(binascii.hexlify(data[i: i + size]),16)
for i in range(0, size * n, size))
'''return tuple(int.from_bytes(data[i: i + size], 'big')
for i in range(0, size * n, size))'''
def _read_refs(self, n):
return self._read_ints(n, self._ref_size)
def _read_object(self, ref):
"""
read the object by reference.
May recursively read sub-objects (content of an array/dict/set)
"""
result = self._objects[ref]
if result is not _undefined:
return result
offset = self._object_offsets[ref]
self._fp.seek(offset)
token = self._fp.read(1)[0]
if not _check_py3():
token = ord(token)
tokenH, tokenL = token & 0xF0, token & 0x0F
if token == 0x00: # \x00 or 0x00
result = None
elif token == 0x08: # \x08 or 0x08
result = False
elif token == 0x09: # \x09 or 0x09
result = True
# The referenced source code also mentions URL (0x0c, 0x0d) and
# UUID (0x0e), but neither can be generated using the Cocoa libraries.
elif token == 0x0f: # \x0f or 0x0f
result = b''
elif tokenH == 0x10: # int
result = int(binascii.hexlify(self._fp.read(1 << tokenL)),16)
if tokenL >= 3: # Signed - adjust
result = result-(result & 1 << 2**tokenL*8-1)*2
elif token == 0x22: # real
result = struct.unpack('>f', self._fp.read(4))[0]
elif token == 0x23: # real
result = struct.unpack('>d', self._fp.read(8))[0]
elif token == 0x33: # date
f = struct.unpack('>d', self._fp.read(8))[0]
# timestamp 0 of binary plists corresponds to 1/1/2001
# (year of Mac OS X 10.0), instead of 1/1/1970.
result = (datetime.datetime(2001, 1, 1) +
datetime.timedelta(seconds=f))
elif tokenH == 0x40: # data
s = self._get_size(tokenL)
if self._use_builtin_types or not hasattr(plistlib, "Data"):
result = self._fp.read(s)
else:
result = plistlib.Data(self._fp.read(s))
elif tokenH == 0x50: # ascii string
s = self._get_size(tokenL)
result = self._fp.read(s).decode('ascii')
result = result
elif tokenH == 0x60: # unicode string
s = self._get_size(tokenL)
result = self._fp.read(s * 2).decode('utf-16be')
elif tokenH == 0x80: # UID
# used by Key-Archiver plist files
result = UID(int(binascii.hexlify(self._fp.read(1 + tokenL)),16))
elif tokenH == 0xA0: # array
s = self._get_size(tokenL)
obj_refs = self._read_refs(s)
result = []
self._objects[ref] = result
result.extend(self._read_object(x) for x in obj_refs)
# tokenH == 0xB0 is documented as 'ordset', but is not actually
# implemented in the Apple reference code.
# tokenH == 0xC0 is documented as 'set', but sets cannot be used in
# plists.
elif tokenH == 0xD0: # dict
s = self._get_size(tokenL)
key_refs = self._read_refs(s)
obj_refs = self._read_refs(s)
result = self._dict_type()
self._objects[ref] = result
for k, o in zip(key_refs, obj_refs):
key = self._read_object(k)
if hasattr(plistlib, "Data") and isinstance(key, plistlib.Data):
key = key.data
result[key] = self._read_object(o)
else:
raise InvalidFileException()
self._objects[ref] = result
return result
def _count_to_size(count):
if count < 1 << 8:
return 1
elif count < 1 << 16:
return 2
elif count < 1 << 32:
return 4
else:
return 8
_scalars = (str, int, float, datetime.datetime, bytes)
class _BinaryPlistWriter (object):
def __init__(self, fp, sort_keys, skipkeys):
self._fp = fp
self._sort_keys = sort_keys
self._skipkeys = skipkeys
def write(self, value):
# Flattened object list:
self._objlist = []
# Mappings from object->objectid
# First dict has (type(object), object) as the key,
# second dict is used when object is not hashable and
# has id(object) as the key.
self._objtable = {}
self._objidtable = {}
# Create list of all objects in the plist
self._flatten(value)
# Size of object references in serialized containers
# depends on the number of objects in the plist.
num_objects = len(self._objlist)
self._object_offsets = [0]*num_objects
self._ref_size = _count_to_size(num_objects)
self._ref_format = _BINARY_FORMAT[self._ref_size]
# Write file header
self._fp.write(b'bplist00')
# Write object list
for obj in self._objlist:
self._write_object(obj)
# Write refnum->object offset table
top_object = self._getrefnum(value)
offset_table_offset = self._fp.tell()
offset_size = _count_to_size(offset_table_offset)
offset_format = '>' + _BINARY_FORMAT[offset_size] * num_objects
self._fp.write(struct.pack(offset_format, *self._object_offsets))
# Write trailer
sort_version = 0
trailer = (
sort_version, offset_size, self._ref_size, num_objects,
top_object, offset_table_offset
)
self._fp.write(struct.pack('>5xBBBQQQ', *trailer))
def _flatten(self, value):
# First check if the object is in the object table, not used for
# containers to ensure that two subcontainers with the same contents
# will be serialized as distinct values.
if isinstance(value, _scalars):
if (type(value), value) in self._objtable:
return
elif hasattr(plistlib, "Data") and isinstance(value, plistlib.Data):
if (type(value.data), value.data) in self._objtable:
return
elif id(value) in self._objidtable:
return
# Add to objectreference map
refnum = len(self._objlist)
self._objlist.append(value)
if isinstance(value, _scalars):
self._objtable[(type(value), value)] = refnum
elif hasattr(plistlib, "Data") and isinstance(value, plistlib.Data):
self._objtable[(type(value.data), value.data)] = refnum
else:
self._objidtable[id(value)] = refnum
# And finally recurse into containers
if isinstance(value, dict):
keys = []
values = []
items = value.items()
if self._sort_keys:
items = sorted(items)
for k, v in items:
if not isinstance(k, basestring):
if self._skipkeys:
continue
raise TypeError("keys must be strings")
keys.append(k)
values.append(v)
for o in itertools.chain(keys, values):
self._flatten(o)
elif isinstance(value, (list, tuple)):
for o in value:
self._flatten(o)
def _getrefnum(self, value):
if isinstance(value, _scalars):
return self._objtable[(type(value), value)]
elif hasattr(plistlib, "Data") and isinstance(value, plistlib.Data):
return self._objtable[(type(value.data), value.data)]
else:
return self._objidtable[id(value)]
def _write_size(self, token, size):
if size < 15:
self._fp.write(struct.pack('>B', token | size))
elif size < 1 << 8:
self._fp.write(struct.pack('>BBB', token | 0xF, 0x10, size))
elif size < 1 << 16:
self._fp.write(struct.pack('>BBH', token | 0xF, 0x11, size))
elif size < 1 << 32:
self._fp.write(struct.pack('>BBL', token | 0xF, 0x12, size))
else:
self._fp.write(struct.pack('>BBQ', token | 0xF, 0x13, size))
def _write_object(self, value):
ref = self._getrefnum(value)
self._object_offsets[ref] = self._fp.tell()
if value is None:
self._fp.write(b'\x00')
elif value is False:
self._fp.write(b'\x08')
elif value is True:
self._fp.write(b'\x09')
elif isinstance(value, int):
if value < 0:
try:
self._fp.write(struct.pack('>Bq', 0x13, value))
except struct.error:
raise OverflowError(value) # from None
elif value < 1 << 8:
self._fp.write(struct.pack('>BB', 0x10, value))
elif value < 1 << 16:
self._fp.write(struct.pack('>BH', 0x11, value))
elif value < 1 << 32:
self._fp.write(struct.pack('>BL', 0x12, value))
elif value < 1 << 63:
self._fp.write(struct.pack('>BQ', 0x13, value))
elif value < 1 << 64:
self._fp.write(binascii.unhexlify("14"+hex(value)[2:].rstrip("L").rjust(32,"0")))
else:
raise OverflowError(value)
elif isinstance(value, float):
self._fp.write(struct.pack('>Bd', 0x23, value))
elif isinstance(value, datetime.datetime):
f = (value - datetime.datetime(2001, 1, 1)).total_seconds()
self._fp.write(struct.pack('>Bd', 0x33, f))
elif (_check_py3() and isinstance(value, (bytes, bytearray))) or (hasattr(plistlib, "Data") and isinstance(value, plistlib.Data)):
if not isinstance(value, (bytes, bytearray)):
value = value.data # Unpack it
self._write_size(0x40, len(value))
self._fp.write(value)
elif isinstance(value, basestring):
try:
t = value.encode('ascii')
self._write_size(0x50, len(value))
except UnicodeEncodeError:
t = value.encode('utf-16be')
self._write_size(0x60, len(t) // 2)
self._fp.write(t)
elif isinstance(value, UID) or (hasattr(plistlib,"UID") and isinstance(value, plistlib.UID)):
if value.data < 0:
raise ValueError("UIDs must be positive")
elif value.data < 1 << 8:
self._fp.write(struct.pack('>BB', 0x80, value))
elif value.data < 1 << 16:
self._fp.write(struct.pack('>BH', 0x81, value))
elif value.data < 1 << 32:
self._fp.write(struct.pack('>BL', 0x83, value))
# elif value.data < 1 << 64:
# self._fp.write(struct.pack('>BQ', 0x87, value))
else:
raise OverflowError(value)
elif isinstance(value, (list, tuple)):
refs = [self._getrefnum(o) for o in value]
s = len(refs)
self._write_size(0xA0, s)
self._fp.write(struct.pack('>' + self._ref_format * s, *refs))
elif isinstance(value, dict):
keyRefs, valRefs = [], []
if self._sort_keys:
rootItems = sorted(value.items())
else:
rootItems = value.items()
for k, v in rootItems:
if not isinstance(k, basestring):
if self._skipkeys:
continue
raise TypeError("keys must be strings")
keyRefs.append(self._getrefnum(k))
valRefs.append(self._getrefnum(v))
s = len(keyRefs)
self._write_size(0xD0, s)
self._fp.write(struct.pack('>' + self._ref_format * s, *keyRefs))
self._fp.write(struct.pack('>' + self._ref_format * s, *valRefs))
else:
raise TypeError(value)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,151 @@
import sys, subprocess, time, threading, shlex
try:
from Queue import Queue, Empty
except:
from queue import Queue, Empty
ON_POSIX = 'posix' in sys.builtin_module_names
class Run:
def __init__(self):
return
def _read_output(self, pipe, q):
try:
for line in iter(lambda: pipe.read(1), b''):
q.put(line)
except ValueError:
pass
pipe.close()
def _create_thread(self, output):
# Creates a new queue and thread object to watch based on the output pipe sent
q = Queue()
t = threading.Thread(target=self._read_output, args=(output, q))
t.daemon = True
return (q,t)
def _stream_output(self, comm, shell = False):
output = error = ""
p = None
try:
if shell and type(comm) is list:
comm = " ".join(shlex.quote(x) for x in comm)
if not shell and type(comm) is str:
comm = shlex.split(comm)
p = subprocess.Popen(comm, shell=shell, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=0, universal_newlines=True, close_fds=ON_POSIX)
# Setup the stdout thread/queue
q,t = self._create_thread(p.stdout)
qe,te = self._create_thread(p.stderr)
# Start both threads
t.start()
te.start()
while True:
c = z = ""
try: c = q.get_nowait()
except Empty: pass
else:
sys.stdout.write(c)
output += c
sys.stdout.flush()
try: z = qe.get_nowait()
except Empty: pass
else:
sys.stderr.write(z)
error += z
sys.stderr.flush()
if not c==z=="": continue # Keep going until empty
# No output - see if still running
p.poll()
if p.returncode != None:
# Subprocess ended
break
# No output, but subprocess still running - stall for 20ms
time.sleep(0.02)
o, e = p.communicate()
return (output+o, error+e, p.returncode)
except:
if p:
try: o, e = p.communicate()
except: o = e = ""
return (output+o, error+e, p.returncode)
return ("", "Command not found!", 1)
def _decode(self, value, encoding="utf-8", errors="ignore"):
# Helper method to only decode if bytes type
if sys.version_info >= (3,0) and isinstance(value, bytes):
return value.decode(encoding,errors)
return value
def _run_command(self, comm, shell = False):
c = None
try:
if shell and type(comm) is list:
comm = " ".join(shlex.quote(x) for x in comm)
if not shell and type(comm) is str:
comm = shlex.split(comm)
p = subprocess.Popen(comm, shell=shell, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
c = p.communicate()
except:
if c == None:
return ("", "Command not found!", 1)
return (self._decode(c[0]), self._decode(c[1]), p.returncode)
def run(self, command_list, leave_on_fail = False):
# Command list should be an array of dicts
if type(command_list) is dict:
# We only have one command
command_list = [command_list]
output_list = []
for comm in command_list:
args = comm.get("args", [])
shell = comm.get("shell", False)
stream = comm.get("stream", False)
sudo = comm.get("sudo", False)
stdout = comm.get("stdout", False)
stderr = comm.get("stderr", False)
mess = comm.get("message", None)
show = comm.get("show", False)
if not mess == None:
print(mess)
if not len(args):
# nothing to process
continue
if sudo:
# Check if we have sudo
out = self._run_command(["which", "sudo"])
if "sudo" in out[0]:
# Can sudo
if type(args) is list:
args.insert(0, out[0].replace("\n", "")) # add to start of list
elif type(args) is str:
args = out[0].replace("\n", "") + " " + args # add to start of string
if show:
print(" ".join(args))
if stream:
# Stream it!
out = self._stream_output(args, shell)
else:
# Just run and gather output
out = self._run_command(args, shell)
if stdout and len(out[0]):
print(out[0])
if stderr and len(out[1]):
print(out[1])
# Append output
output_list.append(out)
# Check for errors
if leave_on_fail and out[2] != 0:
# Got an error - leave
break
if len(output_list) == 1:
# We only ran one command - just return that output
return output_list[0]
return output_list

View File

@@ -0,0 +1,280 @@
import sys, os, time, re, json, datetime, ctypes, subprocess
if os.name == "nt":
# Windows
import msvcrt
else:
# Not Windows \o/
import select
class Utils:
def __init__(self, name = "Python Script", interactive = True):
self.name = name
self.interactive = interactive
# Init our colors before we need to print anything
cwd = os.getcwd()
os.chdir(os.path.dirname(os.path.realpath(__file__)))
if os.path.exists("colors.json"):
self.colors_dict = json.load(open("colors.json"))
else:
self.colors_dict = {}
os.chdir(cwd)
def check_admin(self):
# Returns whether or not we're admin
try:
is_admin = os.getuid() == 0
except AttributeError:
is_admin = ctypes.windll.shell32.IsUserAnAdmin() != 0
return is_admin
def elevate(self, file):
# Runs the passed file as admin
if self.check_admin():
return
if os.name == "nt":
ctypes.windll.shell32.ShellExecuteW(None, "runas", '"{}"'.format(sys.executable), '"{}"'.format(file), None, 1)
else:
try:
p = subprocess.Popen(["which", "sudo"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
c = p.communicate()[0].decode("utf-8", "ignore").replace("\n", "")
os.execv(c, [ sys.executable, 'python'] + sys.argv)
except:
exit(1)
def compare_versions(self, vers1, vers2, **kwargs):
# Helper method to compare ##.## strings
#
# vers1 < vers2 = True
# vers1 = vers2 = None
# vers1 > vers2 = False
# Sanitize the pads
pad = str(kwargs.get("pad", ""))
sep = str(kwargs.get("separator", "."))
ignore_case = kwargs.get("ignore_case", True)
# Cast as strings
vers1 = str(vers1)
vers2 = str(vers2)
if ignore_case:
vers1 = vers1.lower()
vers2 = vers2.lower()
# Split and pad lists
v1_parts, v2_parts = self.pad_length(vers1.split(sep), vers2.split(sep))
# Iterate and compare
for i in range(len(v1_parts)):
# Remove non-numeric
v1 = ''.join(c.lower() for c in v1_parts[i] if c.isalnum())
v2 = ''.join(c.lower() for c in v2_parts[i] if c.isalnum())
# Equalize the lengths
v1, v2 = self.pad_length(v1, v2)
# Compare
if str(v1) < str(v2):
return True
elif str(v1) > str(v2):
return False
# Never differed - return None, must be equal
return None
def pad_length(self, var1, var2, pad = "0"):
# Pads the vars on the left side to make them equal length
pad = "0" if len(str(pad)) < 1 else str(pad)[0]
if not type(var1) == type(var2):
# Type mismatch! Just return what we got
return (var1, var2)
if len(var1) < len(var2):
if type(var1) is list:
var1.extend([str(pad) for x in range(len(var2) - len(var1))])
else:
var1 = "{}{}".format((pad*(len(var2)-len(var1))), var1)
elif len(var2) < len(var1):
if type(var2) is list:
var2.extend([str(pad) for x in range(len(var1) - len(var2))])
else:
var2 = "{}{}".format((pad*(len(var1)-len(var2))), var2)
return (var1, var2)
def check_path(self, path):
# Let's loop until we either get a working path, or no changes
test_path = path
last_path = None
while True:
# Bail if we've looped at least once and the path didn't change
if last_path != None and last_path == test_path: return None
last_path = test_path
# Check if we stripped everything out
if not len(test_path): return None
# Check if we have a valid path
if os.path.exists(test_path):
return os.path.abspath(test_path)
# Check for quotes
if test_path[0] == test_path[-1] and test_path[0] in ('"',"'"):
test_path = test_path[1:-1]
continue
# Check for a tilde and expand if needed
if test_path[0] == "~":
tilde_expanded = os.path.expanduser(test_path)
if tilde_expanded != test_path:
# Got a change
test_path = tilde_expanded
continue
# Let's check for spaces - strip from the left first, then the right
if test_path[0] in (" ","\t"):
test_path = test_path[1:]
continue
if test_path[-1] in (" ","\t"):
test_path = test_path[:-1]
continue
# Maybe we have escapes to handle?
test_path = "\\".join([x.replace("\\", "") for x in test_path.split("\\\\")])
def grab(self, prompt, **kwargs):
# Takes a prompt, a default, and a timeout and shows it with that timeout
# returning the result
timeout = kwargs.get("timeout",0)
default = kwargs.get("default","")
if not self.interactive:
return default
# If we don't have a timeout - then skip the timed sections
if timeout <= 0:
try:
if sys.version_info >= (3, 0):
return input(prompt)
else:
return str(raw_input(prompt))
except EOFError:
return default
# Write our prompt
sys.stdout.write(prompt)
sys.stdout.flush()
if os.name == "nt":
start_time = time.time()
i = ''
while True:
if msvcrt.kbhit():
c = msvcrt.getche()
if ord(c) == 13: # enter_key
break
elif ord(c) >= 32: # space_char
i += c.decode() if sys.version_info >= (3,0) and isinstance(c,bytes) else c
else:
time.sleep(0.02) # Delay for 20ms to prevent CPU workload
if len(i) == 0 and (time.time() - start_time) > timeout:
break
else:
i, o, e = select.select( [sys.stdin], [], [], timeout )
if i:
i = sys.stdin.readline().strip()
print('') # needed to move to next line
if len(i) > 0:
return i
else:
return default
def cls(self):
if not self.interactive:
return
if os.name == "nt":
os.system("cls")
elif os.environ.get("TERM"):
os.system("clear")
def cprint(self, message, **kwargs):
strip_colors = kwargs.get("strip_colors", False)
if os.name == "nt" or not self.interactive:
strip_colors = True
reset = u"\u001b[0m"
# Requires sys import
for c in self.colors:
if strip_colors:
message = message.replace(c["find"], "")
else:
message = message.replace(c["find"], c["replace"])
if strip_colors:
return message
sys.stdout.write(message)
print(reset)
# Needs work to resize the string if color chars exist
'''# Header drawing method
def head(self, text = None, width = 55):
if text == None:
text = self.name
self.cls()
print(" {}".format("#"*width))
len_text = self.cprint(text, strip_colors=True)
mid_len = int(round(width/2-len(len_text)/2)-2)
middle = " #{}{}{}#".format(" "*mid_len, len_text, " "*((width - mid_len - len(len_text))-2))
if len(middle) > width+1:
# Get the difference
di = len(middle) - width
# Add the padding for the ...#
di += 3
# Trim the string
middle = middle[:-di]
newlen = len(middle)
middle += "...#"
find_list = [ c["find"] for c in self.colors ]
# Translate colored string to len
middle = middle.replace(len_text, text + self.rt_color) # always reset just in case
self.cprint(middle)
print("#"*width)'''
# Header drawing method
def head(self, text = None, width = 55):
if not self.interactive:
sys.stderr.write(str(text)+"\n")
sys.stderr.flush()
return
if text is None:
text = self.name
self.cls()
print(" {}".format("#"*width))
mid_len = int(round(width/2-len(text)/2)-2)
middle = " #{}{}{}#".format(" "*mid_len, text, " "*((width - mid_len - len(text))-2))
if len(middle) > width+1:
# Get the difference
di = len(middle) - width
# Add the padding for the ...#
di += 3
# Trim the string
middle = middle[:-di] + "...#"
print(middle)
print("#"*width)
print("")
def info(self, text):
if self.interactive:
print(text)
else:
sys.stderr.write(str(text)+"\n")
sys.stderr.flush()
def resize(self, width, height):
print('\033[8;{};{}t'.format(height, width))
def custom_quit(self):
self.head()
print("by CorpNewt\n")
print("Thanks for testing it out, for bugs/comments/complaints")
print("send me a message on Reddit, or check out my GitHub:\n")
print("www.reddit.com/u/corpnewt")
print("www.github.com/corpnewt\n")
# Get the time and wish them a good morning, afternoon, evening, and night
hr = datetime.datetime.now().time().hour
if hr > 3 and hr < 12:
print("Have a nice morning!\n\n")
elif hr >= 12 and hr < 17:
print("Have a nice afternoon!\n\n")
elif hr >= 17 and hr < 21:
print("Have a nice evening!\n\n")
else:
print("Have a nice night!\n\n")
exit(0)