2014-03-10 21:36:47 +00:00
|
|
|
from __future__ import absolute_import
|
2015-03-13 12:14:37 +00:00
|
|
|
import cStringIO
|
|
|
|
import json
|
|
|
|
import logging
|
|
|
|
import lxml.html
|
|
|
|
import lxml.etree
|
2014-01-03 20:03:58 +00:00
|
|
|
from PIL import Image
|
|
|
|
from PIL.ExifTags import TAGS
|
2015-03-13 12:14:37 +00:00
|
|
|
import subprocess
|
|
|
|
import traceback
|
|
|
|
import urwid
|
2015-05-31 10:39:37 +00:00
|
|
|
import html2text
|
2012-11-23 16:48:24 +00:00
|
|
|
|
2012-09-23 23:21:12 +00:00
|
|
|
import netlib.utils
|
2015-04-13 23:58:10 +00:00
|
|
|
from netlib import odict
|
2015-03-13 12:14:37 +00:00
|
|
|
|
2015-06-02 00:09:07 +00:00
|
|
|
from . import common, signals
|
2015-05-02 04:46:15 +00:00
|
|
|
from .. import utils, encoding
|
2015-05-31 10:39:37 +00:00
|
|
|
from ..contrib import jsbeautifier
|
2014-05-06 17:27:13 +00:00
|
|
|
from ..contrib.wbxml.ASCommandResponse import ASCommandResponse
|
2015-03-13 12:14:37 +00:00
|
|
|
|
2012-08-18 05:51:34 +00:00
|
|
|
try:
|
|
|
|
import pyamf
|
2012-11-26 00:25:07 +00:00
|
|
|
from pyamf import remoting, flex
|
2015-05-30 00:03:28 +00:00
|
|
|
except ImportError: # pragma nocover
|
2012-08-18 05:51:34 +00:00
|
|
|
pyamf = None
|
|
|
|
|
2014-01-04 03:06:42 +00:00
|
|
|
try:
|
|
|
|
import cssutils
|
2015-05-30 00:03:28 +00:00
|
|
|
except ImportError: # pragma nocover
|
2014-01-04 03:06:42 +00:00
|
|
|
cssutils = None
|
|
|
|
else:
|
|
|
|
cssutils.log.setLevel(logging.CRITICAL)
|
|
|
|
|
2014-01-04 14:50:40 +00:00
|
|
|
cssutils.ser.prefs.keepComments = True
|
|
|
|
cssutils.ser.prefs.omitLastSemicolon = False
|
|
|
|
cssutils.ser.prefs.indentClosingBrace = False
|
|
|
|
cssutils.ser.prefs.validOnly = False
|
|
|
|
|
2015-05-30 00:03:28 +00:00
|
|
|
VIEW_CUTOFF = 1024 * 50
|
2012-03-23 22:21:58 +00:00
|
|
|
|
2012-03-24 01:02:41 +00:00
|
|
|
|
2012-04-08 07:23:05 +00:00
|
|
|
def _view_text(content, total, limit):
|
2012-03-24 21:56:45 +00:00
|
|
|
"""
|
|
|
|
Generates a body for a chunk of text.
|
|
|
|
"""
|
2012-03-23 22:21:58 +00:00
|
|
|
txt = []
|
2012-09-23 23:21:12 +00:00
|
|
|
for i in netlib.utils.cleanBin(content).splitlines():
|
2012-03-23 22:21:58 +00:00
|
|
|
txt.append(
|
2012-04-08 03:00:38 +00:00
|
|
|
urwid.Text(("text", i), wrap="any")
|
2012-03-23 22:21:58 +00:00
|
|
|
)
|
2012-04-08 07:23:05 +00:00
|
|
|
trailer(total, txt, limit)
|
2012-03-24 21:56:45 +00:00
|
|
|
return txt
|
|
|
|
|
|
|
|
|
2012-08-18 05:08:17 +00:00
|
|
|
def trailer(clen, txt, limit):
|
|
|
|
rem = clen - limit
|
|
|
|
if rem > 0:
|
|
|
|
txt.append(urwid.Text(""))
|
2012-03-23 22:21:58 +00:00
|
|
|
txt.append(
|
2012-08-18 05:08:17 +00:00
|
|
|
urwid.Text(
|
|
|
|
[
|
2015-05-30 00:03:28 +00:00
|
|
|
("highlight", "... %s of data not shown. Press " % netlib.utils.pretty_size(rem)),
|
2012-08-18 05:08:17 +00:00
|
|
|
("key", "f"),
|
|
|
|
("highlight", " to load all data.")
|
|
|
|
]
|
|
|
|
)
|
2012-03-23 22:21:58 +00:00
|
|
|
)
|
2012-03-24 01:02:41 +00:00
|
|
|
|
|
|
|
|
2012-08-18 05:08:17 +00:00
|
|
|
class ViewAuto:
|
|
|
|
name = "Auto"
|
|
|
|
prompt = ("auto", "a")
|
|
|
|
content_types = []
|
2015-03-13 12:14:37 +00:00
|
|
|
|
2012-08-18 05:29:29 +00:00
|
|
|
def __call__(self, hdrs, content, limit):
|
2012-08-18 06:14:30 +00:00
|
|
|
ctype = hdrs.get_first("content-type")
|
2012-08-18 05:29:29 +00:00
|
|
|
if ctype:
|
2012-08-18 06:14:30 +00:00
|
|
|
ct = utils.parse_content_type(ctype) if ctype else None
|
2015-05-30 00:03:28 +00:00
|
|
|
ct = "%s/%s" % (ct[0], ct[1])
|
2012-08-18 05:29:29 +00:00
|
|
|
if ct in content_types_map:
|
|
|
|
return content_types_map[ct][0](hdrs, content, limit)
|
|
|
|
elif utils.isXML(content):
|
|
|
|
return get("XML")(hdrs, content, limit)
|
|
|
|
return get("Raw")(hdrs, content, limit)
|
2012-04-07 01:47:03 +00:00
|
|
|
|
|
|
|
|
2012-08-18 05:08:17 +00:00
|
|
|
class ViewRaw:
|
|
|
|
name = "Raw"
|
|
|
|
prompt = ("raw", "r")
|
|
|
|
content_types = []
|
2015-03-13 12:14:37 +00:00
|
|
|
|
2012-08-18 05:08:17 +00:00
|
|
|
def __call__(self, hdrs, content, limit):
|
|
|
|
txt = _view_text(content[:limit], len(content), limit)
|
|
|
|
return "Raw", txt
|
2012-08-17 13:37:30 +00:00
|
|
|
|
|
|
|
|
2012-08-18 05:08:17 +00:00
|
|
|
class ViewHex:
|
|
|
|
name = "Hex"
|
|
|
|
prompt = ("hex", "e")
|
|
|
|
content_types = []
|
2015-03-13 12:14:37 +00:00
|
|
|
|
2012-08-18 05:08:17 +00:00
|
|
|
def __call__(self, hdrs, content, limit):
|
2012-03-24 01:02:41 +00:00
|
|
|
txt = []
|
2012-09-23 23:21:12 +00:00
|
|
|
for offset, hexa, s in netlib.utils.hexdump(content[:limit]):
|
2012-08-18 05:08:17 +00:00
|
|
|
txt.append(urwid.Text([
|
|
|
|
("offset", offset),
|
|
|
|
" ",
|
|
|
|
("text", hexa),
|
|
|
|
" ",
|
|
|
|
("text", s),
|
|
|
|
]))
|
|
|
|
trailer(len(content), txt, limit)
|
|
|
|
return "Hex", txt
|
|
|
|
|
|
|
|
|
|
|
|
class ViewXML:
|
|
|
|
name = "XML"
|
|
|
|
prompt = ("xml", "x")
|
|
|
|
content_types = ["text/xml"]
|
2015-03-13 12:14:37 +00:00
|
|
|
|
2012-08-18 05:08:17 +00:00
|
|
|
def __call__(self, hdrs, content, limit):
|
2015-03-13 12:14:37 +00:00
|
|
|
parser = lxml.etree.XMLParser(
|
|
|
|
remove_blank_text=True,
|
|
|
|
resolve_entities=False,
|
|
|
|
strip_cdata=False,
|
|
|
|
recover=False
|
|
|
|
)
|
2012-08-18 05:08:17 +00:00
|
|
|
try:
|
|
|
|
document = lxml.etree.fromstring(content, parser)
|
|
|
|
except lxml.etree.XMLSyntaxError:
|
|
|
|
return None
|
|
|
|
docinfo = document.getroottree().docinfo
|
|
|
|
|
|
|
|
prev = []
|
|
|
|
p = document.getroottree().getroot().getprevious()
|
|
|
|
while p is not None:
|
|
|
|
prev.insert(
|
|
|
|
0,
|
|
|
|
lxml.etree.tostring(p)
|
|
|
|
)
|
|
|
|
p = p.getprevious()
|
2015-03-13 12:14:37 +00:00
|
|
|
doctype = docinfo.doctype
|
2012-08-18 05:08:17 +00:00
|
|
|
if prev:
|
|
|
|
doctype += "\n".join(prev).strip()
|
|
|
|
doctype = doctype.strip()
|
|
|
|
|
|
|
|
s = lxml.etree.tostring(
|
2015-03-13 12:14:37 +00:00
|
|
|
document,
|
|
|
|
pretty_print=True,
|
|
|
|
xml_declaration=True,
|
|
|
|
doctype=doctype or None,
|
|
|
|
encoding = docinfo.encoding
|
|
|
|
)
|
2012-08-18 05:08:17 +00:00
|
|
|
|
|
|
|
txt = []
|
|
|
|
for i in s[:limit].strip().split("\n"):
|
2012-03-24 01:02:41 +00:00
|
|
|
txt.append(
|
|
|
|
urwid.Text(("text", i)),
|
|
|
|
)
|
2012-08-18 05:08:17 +00:00
|
|
|
trailer(len(content), txt, limit)
|
|
|
|
return "XML-like data", txt
|
|
|
|
|
|
|
|
|
|
|
|
class ViewJSON:
|
|
|
|
name = "JSON"
|
2013-04-16 21:54:34 +00:00
|
|
|
prompt = ("json", "s")
|
2012-08-18 05:08:17 +00:00
|
|
|
content_types = ["application/json"]
|
2015-03-13 12:14:37 +00:00
|
|
|
|
2012-08-18 05:08:17 +00:00
|
|
|
def __call__(self, hdrs, content, limit):
|
|
|
|
lines = utils.pretty_json(content)
|
|
|
|
if lines:
|
|
|
|
txt = []
|
|
|
|
sofar = 0
|
|
|
|
for i in lines:
|
|
|
|
sofar += len(i)
|
|
|
|
txt.append(
|
|
|
|
urwid.Text(("text", i)),
|
|
|
|
)
|
|
|
|
if sofar > limit:
|
|
|
|
break
|
|
|
|
trailer(sum(len(i) for i in lines), txt, limit)
|
|
|
|
return "JSON", txt
|
|
|
|
|
|
|
|
|
|
|
|
class ViewHTML:
|
|
|
|
name = "HTML"
|
|
|
|
prompt = ("html", "h")
|
|
|
|
content_types = ["text/html"]
|
2015-03-13 12:14:37 +00:00
|
|
|
|
2012-08-18 05:08:17 +00:00
|
|
|
def __call__(self, hdrs, content, limit):
|
|
|
|
if utils.isXML(content):
|
2015-03-13 12:14:37 +00:00
|
|
|
parser = lxml.etree.HTMLParser(
|
|
|
|
strip_cdata=True,
|
|
|
|
remove_blank_text=True
|
|
|
|
)
|
2012-08-18 05:08:17 +00:00
|
|
|
d = lxml.html.fromstring(content, parser=parser)
|
|
|
|
docinfo = d.getroottree().docinfo
|
2015-03-13 12:14:37 +00:00
|
|
|
s = lxml.etree.tostring(
|
|
|
|
d,
|
|
|
|
pretty_print=True,
|
|
|
|
doctype=docinfo.doctype
|
|
|
|
)
|
2012-08-18 05:08:17 +00:00
|
|
|
return "HTML", _view_text(s[:limit], len(s), limit)
|
|
|
|
|
|
|
|
|
|
|
|
class ViewHTMLOutline:
|
|
|
|
name = "HTML Outline"
|
|
|
|
prompt = ("html outline", "o")
|
|
|
|
content_types = ["text/html"]
|
2015-03-13 12:14:37 +00:00
|
|
|
|
2012-08-18 05:08:17 +00:00
|
|
|
def __call__(self, hdrs, content, limit):
|
|
|
|
content = content.decode("utf-8")
|
|
|
|
h = html2text.HTML2Text(baseurl="")
|
|
|
|
h.ignore_images = True
|
|
|
|
h.body_width = 0
|
|
|
|
content = h.handle(content)
|
|
|
|
txt = _view_text(content[:limit], len(content), limit)
|
|
|
|
return "HTML Outline", txt
|
|
|
|
|
|
|
|
|
|
|
|
class ViewURLEncoded:
|
|
|
|
name = "URL-encoded"
|
|
|
|
prompt = ("urlencoded", "u")
|
|
|
|
content_types = ["application/x-www-form-urlencoded"]
|
2015-03-13 12:14:37 +00:00
|
|
|
|
2012-08-18 05:08:17 +00:00
|
|
|
def __call__(self, hdrs, content, limit):
|
2015-08-01 08:40:19 +00:00
|
|
|
lines = netlib.utils.urldecode(content)
|
2012-08-18 05:08:17 +00:00
|
|
|
if lines:
|
|
|
|
body = common.format_keyvals(
|
2015-05-30 00:03:28 +00:00
|
|
|
[(k + ":", v) for (k, v) in lines],
|
2015-03-13 12:14:37 +00:00
|
|
|
key = "header",
|
|
|
|
val = "text"
|
|
|
|
)
|
2012-08-18 05:08:17 +00:00
|
|
|
return "URLEncoded form", body
|
|
|
|
|
|
|
|
|
|
|
|
class ViewMultipart:
|
|
|
|
name = "Multipart Form"
|
|
|
|
prompt = ("multipart", "m")
|
|
|
|
content_types = ["multipart/form-data"]
|
2015-03-13 12:14:37 +00:00
|
|
|
|
2012-08-18 05:08:17 +00:00
|
|
|
def __call__(self, hdrs, content, limit):
|
2015-03-10 09:44:06 +00:00
|
|
|
v = utils.multipartdecode(hdrs, content)
|
2012-08-18 05:08:17 +00:00
|
|
|
if v:
|
|
|
|
r = [
|
|
|
|
urwid.Text(("highlight", "Form data:\n")),
|
|
|
|
]
|
|
|
|
r.extend(common.format_keyvals(
|
2015-03-10 09:44:06 +00:00
|
|
|
v,
|
2012-08-18 05:08:17 +00:00
|
|
|
key = "header",
|
|
|
|
val = "text"
|
|
|
|
))
|
|
|
|
return "Multipart form", r
|
|
|
|
|
|
|
|
|
2012-11-26 00:25:07 +00:00
|
|
|
if pyamf:
|
|
|
|
class DummyObject(dict):
|
|
|
|
def __init__(self, alias):
|
|
|
|
dict.__init__(self)
|
|
|
|
|
|
|
|
def __readamf__(self, input):
|
|
|
|
data = input.readObject()
|
|
|
|
self["data"] = data
|
|
|
|
|
|
|
|
def pyamf_class_loader(s):
|
|
|
|
for i in pyamf.CLASS_LOADERS:
|
|
|
|
if i != pyamf_class_loader:
|
|
|
|
v = i(s)
|
|
|
|
if v:
|
|
|
|
return v
|
|
|
|
return DummyObject
|
|
|
|
|
|
|
|
pyamf.register_class_loader(pyamf_class_loader)
|
|
|
|
|
|
|
|
class ViewAMF:
|
|
|
|
name = "AMF"
|
|
|
|
prompt = ("amf", "f")
|
|
|
|
content_types = ["application/x-amf"]
|
|
|
|
|
|
|
|
def unpack(self, b, seen=set([])):
|
|
|
|
if hasattr(b, "body"):
|
|
|
|
return self.unpack(b.body, seen)
|
|
|
|
if isinstance(b, DummyObject):
|
|
|
|
if id(b) in seen:
|
|
|
|
return "<recursion>"
|
|
|
|
else:
|
|
|
|
seen.add(id(b))
|
|
|
|
for k, v in b.items():
|
|
|
|
b[k] = self.unpack(v, seen)
|
|
|
|
return b
|
|
|
|
elif isinstance(b, dict):
|
|
|
|
for k, v in b.items():
|
|
|
|
b[k] = self.unpack(v, seen)
|
|
|
|
return b
|
|
|
|
elif isinstance(b, list):
|
|
|
|
return [self.unpack(i) for i in b]
|
|
|
|
elif isinstance(b, flex.ArrayCollection):
|
|
|
|
return [self.unpack(i, seen) for i in b]
|
|
|
|
else:
|
|
|
|
return b
|
|
|
|
|
|
|
|
def __call__(self, hdrs, content, limit):
|
|
|
|
envelope = remoting.decode(content, strict=False)
|
|
|
|
if not envelope:
|
|
|
|
return None
|
2012-08-18 05:51:34 +00:00
|
|
|
|
2012-11-26 00:25:07 +00:00
|
|
|
txt = []
|
|
|
|
for target, message in iter(envelope):
|
|
|
|
if isinstance(message, pyamf.remoting.Request):
|
|
|
|
txt.append(urwid.Text([
|
|
|
|
("header", "Request: "),
|
|
|
|
("text", str(target)),
|
|
|
|
]))
|
|
|
|
else:
|
|
|
|
txt.append(urwid.Text([
|
|
|
|
("header", "Response: "),
|
2015-05-30 00:03:28 +00:00
|
|
|
("text", "%s, code %s" % (target, message.status)),
|
2012-11-26 00:25:07 +00:00
|
|
|
]))
|
|
|
|
|
|
|
|
s = json.dumps(self.unpack(message), indent=4)
|
|
|
|
txt.extend(_view_text(s[:limit], len(s), limit))
|
|
|
|
|
2015-05-30 00:03:28 +00:00
|
|
|
return "AMF v%s" % envelope.amfVersion, txt
|
2012-08-18 05:08:17 +00:00
|
|
|
|
|
|
|
|
|
|
|
class ViewJavaScript:
|
|
|
|
name = "JavaScript"
|
|
|
|
prompt = ("javascript", "j")
|
|
|
|
content_types = [
|
|
|
|
"application/x-javascript",
|
|
|
|
"application/javascript",
|
|
|
|
"text/javascript"
|
|
|
|
]
|
2015-03-13 12:14:37 +00:00
|
|
|
|
2012-08-18 05:08:17 +00:00
|
|
|
def __call__(self, hdrs, content, limit):
|
|
|
|
opts = jsbeautifier.default_options()
|
|
|
|
opts.indent_size = 2
|
|
|
|
res = jsbeautifier.beautify(content[:limit], opts)
|
2014-01-04 03:06:42 +00:00
|
|
|
return "JavaScript", _view_text(res, len(res), limit)
|
|
|
|
|
2015-03-13 12:14:37 +00:00
|
|
|
|
2014-01-04 03:06:42 +00:00
|
|
|
class ViewCSS:
|
|
|
|
name = "CSS"
|
2014-01-04 16:13:46 +00:00
|
|
|
prompt = ("css", "c")
|
2014-01-04 03:06:42 +00:00
|
|
|
content_types = [
|
|
|
|
"text/css"
|
|
|
|
]
|
|
|
|
|
|
|
|
def __call__(self, hdrs, content, limit):
|
|
|
|
if cssutils:
|
|
|
|
sheet = cssutils.parseString(content)
|
|
|
|
beautified = sheet.cssText
|
|
|
|
else:
|
|
|
|
beautified = content
|
|
|
|
|
|
|
|
return "CSS", _view_text(beautified, len(beautified), limit)
|
2012-08-18 05:08:17 +00:00
|
|
|
|
|
|
|
|
|
|
|
class ViewImage:
|
|
|
|
name = "Image"
|
|
|
|
prompt = ("image", "i")
|
|
|
|
content_types = [
|
|
|
|
"image/png",
|
|
|
|
"image/jpeg",
|
|
|
|
"image/gif",
|
|
|
|
"image/vnd.microsoft.icon",
|
|
|
|
"image/x-icon",
|
|
|
|
]
|
2015-03-13 12:14:37 +00:00
|
|
|
|
2012-08-18 05:08:17 +00:00
|
|
|
def __call__(self, hdrs, content, limit):
|
|
|
|
try:
|
|
|
|
img = Image.open(cStringIO.StringIO(content))
|
|
|
|
except IOError:
|
|
|
|
return None
|
|
|
|
parts = [
|
|
|
|
("Format", str(img.format_description)),
|
2015-05-30 00:03:28 +00:00
|
|
|
("Size", "%s x %s px" % img.size),
|
2012-08-18 05:08:17 +00:00
|
|
|
("Mode", str(img.mode)),
|
2012-03-24 21:10:48 +00:00
|
|
|
]
|
2012-08-18 05:08:17 +00:00
|
|
|
for i in sorted(img.info.keys()):
|
|
|
|
if i != "exif":
|
|
|
|
parts.append(
|
|
|
|
(str(i), str(img.info[i]))
|
|
|
|
)
|
|
|
|
if hasattr(img, "_getexif"):
|
|
|
|
ex = img._getexif()
|
|
|
|
if ex:
|
|
|
|
for i in sorted(ex.keys()):
|
|
|
|
tag = TAGS.get(i, i)
|
|
|
|
parts.append(
|
|
|
|
(str(tag), str(ex[i]))
|
|
|
|
)
|
|
|
|
clean = []
|
|
|
|
for i in parts:
|
2015-03-13 12:14:37 +00:00
|
|
|
clean.append(
|
|
|
|
[netlib.utils.cleanBin(i[0]), netlib.utils.cleanBin(i[1])]
|
2012-08-18 05:08:17 +00:00
|
|
|
)
|
2015-03-13 12:14:37 +00:00
|
|
|
fmt = common.format_keyvals(
|
|
|
|
clean,
|
|
|
|
key = "header",
|
|
|
|
val = "text"
|
|
|
|
)
|
2015-05-30 00:03:28 +00:00
|
|
|
return "%s image" % img.format, fmt
|
2012-08-18 05:08:17 +00:00
|
|
|
|
2015-03-13 12:14:37 +00:00
|
|
|
|
2013-04-06 18:18:12 +00:00
|
|
|
class ViewProtobuf:
|
|
|
|
"""Human friendly view of protocol buffers
|
|
|
|
The view uses the protoc compiler to decode the binary
|
|
|
|
"""
|
|
|
|
|
|
|
|
name = "Protocol Buffer"
|
|
|
|
prompt = ("protobuf", "p")
|
2014-01-05 23:40:02 +00:00
|
|
|
content_types = [
|
|
|
|
"application/x-protobuf",
|
|
|
|
"application/x-protobuffer",
|
|
|
|
]
|
2013-04-06 18:18:12 +00:00
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def is_available():
|
|
|
|
try:
|
2015-03-13 12:14:37 +00:00
|
|
|
p = subprocess.Popen(
|
|
|
|
["protoc", "--version"],
|
|
|
|
stdout=subprocess.PIPE
|
|
|
|
)
|
2013-04-06 18:18:12 +00:00
|
|
|
out, _ = p.communicate()
|
|
|
|
return out.startswith("libprotoc")
|
|
|
|
except:
|
|
|
|
return False
|
|
|
|
|
|
|
|
def decode_protobuf(self, content):
|
|
|
|
# if Popen raises OSError, it will be caught in
|
|
|
|
# get_content_view and fall back to Raw
|
|
|
|
p = subprocess.Popen(['protoc', '--decode_raw'],
|
|
|
|
stdin=subprocess.PIPE,
|
|
|
|
stdout=subprocess.PIPE,
|
|
|
|
stderr=subprocess.PIPE)
|
2013-04-21 17:46:37 +00:00
|
|
|
out, err = p.communicate(input=content)
|
|
|
|
if out:
|
|
|
|
return out
|
|
|
|
else:
|
|
|
|
return err
|
2013-04-06 18:18:12 +00:00
|
|
|
|
|
|
|
def __call__(self, hdrs, content, limit):
|
|
|
|
decoded = self.decode_protobuf(content)
|
|
|
|
txt = _view_text(decoded[:limit], len(decoded), limit)
|
|
|
|
return "Protobuf", txt
|
2012-08-18 05:08:17 +00:00
|
|
|
|
2015-03-13 12:14:37 +00:00
|
|
|
|
2014-05-06 17:27:13 +00:00
|
|
|
class ViewWBXML:
|
|
|
|
name = "WBXML"
|
|
|
|
prompt = ("wbxml", "w")
|
|
|
|
content_types = [
|
|
|
|
"application/vnd.wap.wbxml",
|
|
|
|
"application/vnd.ms-sync.wbxml"
|
|
|
|
]
|
|
|
|
|
|
|
|
def __call__(self, hdrs, content, limit):
|
2015-03-13 12:14:37 +00:00
|
|
|
|
2014-05-06 17:27:13 +00:00
|
|
|
try:
|
|
|
|
parser = ASCommandResponse(content)
|
|
|
|
parsedContent = parser.xmlString
|
|
|
|
txt = _view_text(parsedContent, len(parsedContent), limit)
|
|
|
|
return "WBXML", txt
|
|
|
|
except:
|
2015-03-13 12:14:37 +00:00
|
|
|
return None
|
2014-05-06 17:27:13 +00:00
|
|
|
|
2012-08-18 05:08:17 +00:00
|
|
|
views = [
|
|
|
|
ViewAuto(),
|
|
|
|
ViewRaw(),
|
|
|
|
ViewHex(),
|
|
|
|
ViewJSON(),
|
|
|
|
ViewXML(),
|
2014-05-06 17:27:13 +00:00
|
|
|
ViewWBXML(),
|
2012-08-18 05:08:17 +00:00
|
|
|
ViewHTML(),
|
|
|
|
ViewHTMLOutline(),
|
|
|
|
ViewJavaScript(),
|
2014-01-04 03:06:42 +00:00
|
|
|
ViewCSS(),
|
2012-08-18 05:08:17 +00:00
|
|
|
ViewURLEncoded(),
|
|
|
|
ViewMultipart(),
|
|
|
|
ViewImage(),
|
|
|
|
]
|
2012-08-18 05:51:34 +00:00
|
|
|
if pyamf:
|
2012-08-18 05:08:17 +00:00
|
|
|
views.append(ViewAMF())
|
2012-03-23 22:21:58 +00:00
|
|
|
|
2013-04-06 18:18:12 +00:00
|
|
|
if ViewProtobuf.is_available():
|
|
|
|
views.append(ViewProtobuf())
|
|
|
|
|
2012-08-18 05:08:17 +00:00
|
|
|
content_types_map = {}
|
|
|
|
for i in views:
|
|
|
|
for ct in i.content_types:
|
|
|
|
l = content_types_map.setdefault(ct, [])
|
|
|
|
l.append(i)
|
2012-03-23 22:21:58 +00:00
|
|
|
|
|
|
|
|
2012-08-18 05:08:17 +00:00
|
|
|
view_prompts = [i.prompt for i in views]
|
2012-03-24 21:56:45 +00:00
|
|
|
|
|
|
|
|
2012-08-18 05:08:17 +00:00
|
|
|
def get_by_shortcut(c):
|
|
|
|
for i in views:
|
|
|
|
if i.prompt[1] == c:
|
|
|
|
return i
|
|
|
|
|
|
|
|
|
|
|
|
def get(name):
|
|
|
|
for i in views:
|
|
|
|
if i.name == name:
|
|
|
|
return i
|
|
|
|
|
2012-03-23 22:21:58 +00:00
|
|
|
|
2015-06-02 00:09:07 +00:00
|
|
|
def get_content_view(viewmode, hdrItems, content, limit, is_request):
|
2012-03-24 01:02:41 +00:00
|
|
|
"""
|
|
|
|
Returns a (msg, body) tuple.
|
|
|
|
"""
|
2012-08-30 00:51:04 +00:00
|
|
|
if not content:
|
2015-02-05 16:12:48 +00:00
|
|
|
if is_request:
|
|
|
|
return "No request content (press tab to view response)", ""
|
|
|
|
else:
|
|
|
|
return "No content", ""
|
2012-03-24 01:02:41 +00:00
|
|
|
msg = []
|
|
|
|
|
2015-04-13 23:58:10 +00:00
|
|
|
hdrs = odict.ODictCaseless([list(i) for i in hdrItems])
|
2012-03-24 01:02:41 +00:00
|
|
|
|
2012-08-18 06:14:30 +00:00
|
|
|
enc = hdrs.get_first("content-encoding")
|
|
|
|
if enc and enc != "identity":
|
|
|
|
decoded = encoding.decode(enc, content)
|
2012-03-24 01:02:41 +00:00
|
|
|
if decoded:
|
|
|
|
content = decoded
|
2015-05-30 00:03:28 +00:00
|
|
|
msg.append("[decoded %s]" % enc)
|
2012-07-06 03:43:33 +00:00
|
|
|
try:
|
2012-08-18 05:29:29 +00:00
|
|
|
ret = viewmode(hdrs, content, limit)
|
2012-07-06 03:43:33 +00:00
|
|
|
# Third-party viewers can fail in unexpected ways...
|
2014-01-19 05:16:24 +00:00
|
|
|
except Exception:
|
2012-08-18 05:29:29 +00:00
|
|
|
s = traceback.format_exc()
|
2015-03-13 12:14:37 +00:00
|
|
|
s = "Content viewer failed: \n" + s
|
2015-06-02 00:09:07 +00:00
|
|
|
signals.add_event(s, "error")
|
2012-07-06 03:43:33 +00:00
|
|
|
ret = None
|
2012-03-24 01:02:41 +00:00
|
|
|
if not ret:
|
2012-08-18 05:08:17 +00:00
|
|
|
ret = get("Raw")(hdrs, content, limit)
|
2012-04-07 01:47:03 +00:00
|
|
|
msg.append("Couldn't parse: falling back to Raw")
|
2012-04-01 23:22:01 +00:00
|
|
|
else:
|
|
|
|
msg.append(ret[0])
|
2012-03-24 01:02:41 +00:00
|
|
|
return " ".join(msg), ret[1]
|