2014-08-16 13:53:07 +00:00
|
|
|
from __future__ import (absolute_import, print_function, division)
|
2015-06-23 10:16:03 +00:00
|
|
|
import os.path
|
2015-08-01 08:39:14 +00:00
|
|
|
import cgi
|
|
|
|
import urllib
|
|
|
|
import urlparse
|
|
|
|
import string
|
2015-08-05 19:32:53 +00:00
|
|
|
import re
|
2015-08-01 08:39:14 +00:00
|
|
|
|
2012-09-23 23:21:48 +00:00
|
|
|
|
2013-03-03 08:36:19 +00:00
|
|
|
def isascii(s):
|
|
|
|
try:
|
|
|
|
s.decode("ascii")
|
|
|
|
except ValueError:
|
|
|
|
return False
|
|
|
|
return True
|
|
|
|
|
2015-04-20 23:19:00 +00:00
|
|
|
|
2015-04-10 02:35:40 +00:00
|
|
|
# best way to do it in python 2.x
|
|
|
|
def bytes_to_int(i):
|
2015-04-20 23:19:00 +00:00
|
|
|
return int(i.encode('hex'), 16)
|
|
|
|
|
2013-03-03 08:36:19 +00:00
|
|
|
|
2012-09-23 23:21:48 +00:00
|
|
|
def cleanBin(s, fixspacing=False):
|
|
|
|
"""
|
|
|
|
Cleans binary data to make it safe to display. If fixspacing is True,
|
|
|
|
tabs, newlines and so forth will be maintained, if not, they will be
|
|
|
|
replaced with a placeholder.
|
|
|
|
"""
|
|
|
|
parts = []
|
|
|
|
for i in s:
|
|
|
|
o = ord(i)
|
|
|
|
if (o > 31 and o < 127):
|
|
|
|
parts.append(i)
|
2012-09-26 22:59:46 +00:00
|
|
|
elif i in "\n\t" and not fixspacing:
|
2012-09-23 23:21:48 +00:00
|
|
|
parts.append(i)
|
|
|
|
else:
|
|
|
|
parts.append(".")
|
|
|
|
return "".join(parts)
|
|
|
|
|
|
|
|
|
|
|
|
def hexdump(s):
|
|
|
|
"""
|
|
|
|
Returns a set of tuples:
|
|
|
|
(offset, hex, str)
|
|
|
|
"""
|
|
|
|
parts = []
|
|
|
|
for i in range(0, len(s), 16):
|
2014-06-25 18:31:10 +00:00
|
|
|
o = "%.10x" % i
|
|
|
|
part = s[i:i + 16]
|
|
|
|
x = " ".join("%.2x" % ord(i) for i in part)
|
2012-09-23 23:21:48 +00:00
|
|
|
if len(part) < 16:
|
|
|
|
x += " "
|
|
|
|
x += " ".join(" " for i in range(16 - len(part)))
|
|
|
|
parts.append(
|
|
|
|
(o, x, cleanBin(part, True))
|
|
|
|
)
|
2015-04-20 23:05:12 +00:00
|
|
|
return parts
|
2015-04-24 03:09:21 +00:00
|
|
|
|
|
|
|
|
|
|
|
def setbit(byte, offset, value):
|
|
|
|
"""
|
|
|
|
Set a bit in a byte to 1 if value is truthy, 0 if not.
|
|
|
|
"""
|
|
|
|
if value:
|
|
|
|
return byte | (1 << offset)
|
|
|
|
else:
|
|
|
|
return byte & ~(1 << offset)
|
|
|
|
|
|
|
|
|
|
|
|
def getbit(byte, offset):
|
|
|
|
mask = 1 << offset
|
|
|
|
if byte & mask:
|
|
|
|
return True
|
2015-04-29 21:04:22 +00:00
|
|
|
|
|
|
|
|
2015-06-17 11:10:27 +00:00
|
|
|
class BiDi(object):
|
2015-05-27 09:18:54 +00:00
|
|
|
|
2015-04-29 21:04:22 +00:00
|
|
|
"""
|
|
|
|
A wee utility class for keeping bi-directional mappings, like field
|
2015-04-30 00:10:08 +00:00
|
|
|
constants in protocols. Names are attributes on the object, dict-like
|
|
|
|
access maps values to names:
|
2015-04-29 21:04:22 +00:00
|
|
|
|
|
|
|
CONST = BiDi(a=1, b=2)
|
|
|
|
assert CONST.a == 1
|
2015-04-30 00:10:08 +00:00
|
|
|
assert CONST.get_name(1) == "a"
|
2015-04-29 21:04:22 +00:00
|
|
|
"""
|
2015-05-27 09:18:54 +00:00
|
|
|
|
2015-04-29 21:04:22 +00:00
|
|
|
def __init__(self, **kwargs):
|
|
|
|
self.names = kwargs
|
|
|
|
self.values = {}
|
|
|
|
for k, v in kwargs.items():
|
|
|
|
self.values[v] = k
|
|
|
|
if len(self.names) != len(self.values):
|
|
|
|
raise ValueError("Duplicate values not allowed.")
|
|
|
|
|
|
|
|
def __getattr__(self, k):
|
|
|
|
if k in self.names:
|
|
|
|
return self.names[k]
|
|
|
|
raise AttributeError("No such attribute: %s", k)
|
|
|
|
|
2015-04-30 00:10:08 +00:00
|
|
|
def get_name(self, n, default=None):
|
|
|
|
return self.values.get(n, default)
|
|
|
|
|
|
|
|
|
|
|
|
def pretty_size(size):
|
|
|
|
suffixes = [
|
2015-05-27 09:18:54 +00:00
|
|
|
("B", 2 ** 10),
|
|
|
|
("kB", 2 ** 20),
|
|
|
|
("MB", 2 ** 30),
|
2015-04-30 00:10:08 +00:00
|
|
|
]
|
|
|
|
for suf, lim in suffixes:
|
|
|
|
if size >= lim:
|
|
|
|
continue
|
|
|
|
else:
|
2015-05-27 09:18:54 +00:00
|
|
|
x = round(size / float(lim / 2 ** 10), 2)
|
2015-04-30 00:10:08 +00:00
|
|
|
if x == int(x):
|
|
|
|
x = int(x)
|
|
|
|
return str(x) + suf
|
2015-06-23 10:16:03 +00:00
|
|
|
|
|
|
|
|
|
|
|
class Data(object):
|
2015-08-10 18:44:36 +00:00
|
|
|
|
2015-06-23 10:16:03 +00:00
|
|
|
def __init__(self, name):
|
|
|
|
m = __import__(name)
|
|
|
|
dirname, _ = os.path.split(m.__file__)
|
|
|
|
self.dirname = os.path.abspath(dirname)
|
|
|
|
|
|
|
|
def path(self, path):
|
|
|
|
"""
|
|
|
|
Returns a path to the package data housed at 'path' under this
|
|
|
|
module.Path can be a path to a file, or to a directory.
|
|
|
|
|
|
|
|
This function will raise ValueError if the path does not exist.
|
|
|
|
"""
|
2015-08-01 12:49:15 +00:00
|
|
|
fullpath = os.path.join(self.dirname, '../test/', path)
|
2015-06-23 10:16:03 +00:00
|
|
|
if not os.path.exists(fullpath):
|
|
|
|
raise ValueError("dataPath: %s does not exist." % fullpath)
|
|
|
|
return fullpath
|
2015-07-29 09:27:43 +00:00
|
|
|
|
|
|
|
|
2015-08-01 08:39:14 +00:00
|
|
|
def is_valid_port(port):
|
|
|
|
if not 0 <= port <= 65535:
|
|
|
|
return False
|
|
|
|
return True
|
|
|
|
|
|
|
|
|
|
|
|
def is_valid_host(host):
|
|
|
|
try:
|
|
|
|
host.decode("idna")
|
|
|
|
except ValueError:
|
|
|
|
return False
|
|
|
|
if "\0" in host:
|
|
|
|
return None
|
|
|
|
return True
|
|
|
|
|
|
|
|
|
|
|
|
def parse_url(url):
|
|
|
|
"""
|
|
|
|
Returns a (scheme, host, port, path) tuple, or None on error.
|
|
|
|
|
|
|
|
Checks that:
|
|
|
|
port is an integer 0-65535
|
|
|
|
host is a valid IDNA-encoded hostname with no null-bytes
|
|
|
|
path is valid ASCII
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
|
|
|
|
except ValueError:
|
|
|
|
return None
|
|
|
|
if not scheme:
|
|
|
|
return None
|
|
|
|
if '@' in netloc:
|
|
|
|
# FIXME: Consider what to do with the discarded credentials here Most
|
|
|
|
# probably we should extend the signature to return these as a separate
|
|
|
|
# value.
|
|
|
|
_, netloc = string.rsplit(netloc, '@', maxsplit=1)
|
|
|
|
if ':' in netloc:
|
|
|
|
host, port = string.rsplit(netloc, ':', maxsplit=1)
|
|
|
|
try:
|
|
|
|
port = int(port)
|
|
|
|
except ValueError:
|
|
|
|
return None
|
|
|
|
else:
|
|
|
|
host = netloc
|
|
|
|
if scheme == "https":
|
|
|
|
port = 443
|
|
|
|
else:
|
|
|
|
port = 80
|
|
|
|
path = urlparse.urlunparse(('', '', path, params, query, fragment))
|
|
|
|
if not path.startswith("/"):
|
|
|
|
path = "/" + path
|
|
|
|
if not is_valid_host(host):
|
|
|
|
return None
|
|
|
|
if not isascii(path):
|
|
|
|
return None
|
|
|
|
if not is_valid_port(port):
|
|
|
|
return None
|
|
|
|
return scheme, host, port, path
|
|
|
|
|
|
|
|
|
|
|
|
def get_header_tokens(headers, key):
|
|
|
|
"""
|
|
|
|
Retrieve all tokens for a header key. A number of different headers
|
|
|
|
follow a pattern where each header line can containe comma-separated
|
|
|
|
tokens, and headers can be set multiple times.
|
|
|
|
"""
|
|
|
|
toks = []
|
|
|
|
for i in headers[key]:
|
|
|
|
for j in i.split(","):
|
|
|
|
toks.append(j.strip())
|
|
|
|
return toks
|
|
|
|
|
|
|
|
|
2015-07-29 09:27:43 +00:00
|
|
|
def hostport(scheme, host, port):
|
|
|
|
"""
|
|
|
|
Returns the host component, with a port specifcation if needed.
|
|
|
|
"""
|
|
|
|
if (port, scheme) in [(80, "http"), (443, "https")]:
|
|
|
|
return host
|
|
|
|
else:
|
|
|
|
return "%s:%s" % (host, port)
|
2015-08-01 08:39:14 +00:00
|
|
|
|
2015-08-10 18:44:36 +00:00
|
|
|
|
2015-08-01 08:39:14 +00:00
|
|
|
def unparse_url(scheme, host, port, path=""):
|
|
|
|
"""
|
|
|
|
Returns a URL string, constructed from the specified compnents.
|
|
|
|
"""
|
|
|
|
return "%s://%s%s" % (scheme, hostport(scheme, host, port), path)
|
|
|
|
|
|
|
|
|
|
|
|
def urlencode(s):
|
|
|
|
"""
|
|
|
|
Takes a list of (key, value) tuples and returns a urlencoded string.
|
|
|
|
"""
|
|
|
|
s = [tuple(i) for i in s]
|
|
|
|
return urllib.urlencode(s, False)
|
|
|
|
|
2015-08-10 18:44:36 +00:00
|
|
|
|
2015-08-01 08:39:14 +00:00
|
|
|
def urldecode(s):
|
|
|
|
"""
|
|
|
|
Takes a urlencoded string and returns a list of (key, value) tuples.
|
|
|
|
"""
|
|
|
|
return cgi.parse_qsl(s, keep_blank_values=True)
|
2015-08-05 19:32:53 +00:00
|
|
|
|
|
|
|
|
|
|
|
def parse_content_type(c):
|
|
|
|
"""
|
|
|
|
A simple parser for content-type values. Returns a (type, subtype,
|
|
|
|
parameters) tuple, where type and subtype are strings, and parameters
|
|
|
|
is a dict. If the string could not be parsed, return None.
|
|
|
|
|
|
|
|
E.g. the following string:
|
|
|
|
|
|
|
|
text/html; charset=UTF-8
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
|
|
|
("text", "html", {"charset": "UTF-8"})
|
|
|
|
"""
|
|
|
|
parts = c.split(";", 1)
|
|
|
|
ts = parts[0].split("/", 1)
|
|
|
|
if len(ts) != 2:
|
|
|
|
return None
|
|
|
|
d = {}
|
|
|
|
if len(parts) == 2:
|
|
|
|
for i in parts[1].split(";"):
|
|
|
|
clause = i.split("=", 1)
|
|
|
|
if len(clause) == 2:
|
|
|
|
d[clause[0].strip()] = clause[1].strip()
|
|
|
|
return ts[0].lower(), ts[1].lower(), d
|
|
|
|
|
|
|
|
|
|
|
|
def multipartdecode(hdrs, content):
|
|
|
|
"""
|
|
|
|
Takes a multipart boundary encoded string and returns list of (key, value) tuples.
|
|
|
|
"""
|
|
|
|
v = hdrs.get_first("content-type")
|
|
|
|
if v:
|
|
|
|
v = parse_content_type(v)
|
|
|
|
if not v:
|
|
|
|
return []
|
|
|
|
boundary = v[2].get("boundary")
|
|
|
|
if not boundary:
|
|
|
|
return []
|
|
|
|
|
|
|
|
rx = re.compile(r'\bname="([^"]+)"')
|
|
|
|
r = []
|
|
|
|
|
|
|
|
for i in content.split("--" + boundary):
|
|
|
|
parts = i.splitlines()
|
|
|
|
if len(parts) > 1 and parts[0][0:2] != "--":
|
|
|
|
match = rx.search(parts[1])
|
|
|
|
if match:
|
|
|
|
key = match.group(1)
|
|
|
|
value = "".join(parts[3 + parts[2:].index(""):])
|
|
|
|
r.append((key, value))
|
|
|
|
return r
|
|
|
|
return []
|