Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions lglaf.lua
Original file line number Diff line number Diff line change
Expand Up @@ -38,9 +38,9 @@ function lglaf.dissector(tvb, pinfo, tree)
local endpoint = usb_endpoint().value

-- Process only bulk packets from (EP 5) and to the device (EP 3)
if not ((endpoint == 0x85 or endpoint == 3) and transfer_type == 3) then
return 0
end
-- if not ((endpoint == 0x85 or endpoint == 3) and transfer_type == 3) then
-- return 0
-- end
Copy link
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes this is unfortunate, I had to do the same to test your capture (actually, the bulk transfer type check can stay).

Maybe I have to detect this earlier or turn it into a pref (or drop the EP number check).


pinfo.cols.protocol = lglaf.name

Expand Down
2 changes: 1 addition & 1 deletion lglaf.py
Original file line number Diff line number Diff line change
Expand Up @@ -195,7 +195,7 @@ def close(self):
class USBCommunication(Communication):
VENDOR_ID_LG = 0x1004
# Read timeout. Set to 0 to disable timeouts
READ_TIMEOUT_MS = 60000
READ_TIMEOUT_MS = 6000
def __init__(self):
super(USBCommunication, self).__init__()
# Match device using heuristics on the interface/endpoint descriptors,
Expand Down
44 changes: 38 additions & 6 deletions partitions.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,9 @@
from __future__ import print_function
from collections import OrderedDict
from contextlib import closing, contextmanager
import argparse, logging, os, struct, sys
import argparse, logging, os, struct, sys, time
import lglaf
import usb.core, usb.util

_logger = logging.getLogger("partitions")

Expand All @@ -32,7 +33,7 @@ def get_partitions(comm):
Maps partition labels (such as "recovery") to block devices (such as
"mmcblk0p0"), sorted by the number in the block device.
"""
name_cmd = 'ls -l /dev/block/bootdevice/by-name'
name_cmd = 'ls -l /dev/block/platform/msm_sdcc.1/by-name'
output = comm.call(lglaf.make_exec_request(name_cmd))[1]
output = output.strip().decode('ascii')
names = []
Expand Down Expand Up @@ -80,11 +81,42 @@ def laf_open_disk(comm):
def laf_read(comm, fd_num, offset, size):
"""Read size bytes at the given block offset."""
read_cmd = lglaf.make_request(b'READ', args=[fd_num, offset, size])
header, response = comm.call(read_cmd)
for attempt in range(3):
try:
header, response = comm.call(read_cmd)
break
except usb.core.USBError as e:
if e.strerror == 'Overflow':
_logger.debug("Overflow on READ %d %d %d", fd_num, offset, size)
for attempt in range(3):
try:
comm.reset()
comm._read(-1) # clear line
break
except usb.core.USBError: pass
continue
elif e.strerror == 'Operation timed out':
_logger.debug("Timeout on READ %d %d %d", fd_num, offset, size)
comm.close()
time.sleep(3)
comm.__init__()
try:
lglaf.try_hello(comm)
except usb.core.USBError: pass
close_cmd = lglaf.make_request(b'CLSE', args=[fd_num])
comm.call(close_cmd)
open_cmd = lglaf.make_request(b'OPEN', body=b'\0')
open_header = comm.call(open_cmd)[0]
fd_num = read_uint32(open_header, 4)
read_cmd = lglaf.make_request(b'READ', args=[fd_num, offset, size])
continue
else:
raise # rethrow

# Ensure that response fd, offset and length are sane (match the request)
assert read_cmd[4:4+12] == header[4:4+12], "Unexpected read response"
assert len(response) == size
return response
return response, fd_num

def laf_erase(comm, fd_num, sector_start, sector_count):
"""TRIM some sectors."""
Expand Down Expand Up @@ -155,13 +187,13 @@ def dump_partition(comm, disk_fd, local_path, part_offset, part_size):
# whole block and drop the leading bytes.
if unaligned_bytes:
chunksize = min(end_offset - read_offset, BLOCK_SIZE)
data = laf_read(comm, disk_fd, read_offset // BLOCK_SIZE, chunksize)
data, disk_fd = laf_read(comm, disk_fd, read_offset // BLOCK_SIZE, chunksize)
f.write(data[unaligned_bytes:])
read_offset += BLOCK_SIZE

while read_offset < end_offset:
chunksize = min(end_offset - read_offset, BLOCK_SIZE * MAX_BLOCK_SIZE)
data = laf_read(comm, disk_fd, read_offset // BLOCK_SIZE, chunksize)
data, disk_fd = laf_read(comm, disk_fd, read_offset // BLOCK_SIZE, chunksize)
f.write(data)
read_offset += chunksize
_logger.info("Wrote %d bytes to %s", part_size, local_path)
Expand Down