Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
170 changes: 170 additions & 0 deletions auto.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,170 @@
// SPDX-License-Identifier: BSD-3-Clause
/*
* Copyright (c) 2026, Qualcomm Innovation Center, Inc. All rights reserved.
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Copyright statement.

*
* QDL_DEVICE_AUTO: meta-backend that defers transport selection to the
* wait loop. Each 250 ms tick it runs both the libusb open attempt and
* (on Windows) a QUD SetupAPI probe; whichever first reaches an EDL
* device wins, and its concrete qdl_device is bound as the inner. All
* subsequent qdl_read/write/close calls on the outer forward to the
* inner.
*
* This replaces an earlier "decide upfront with a 2 s grace window"
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This comment refers to a design we only had in the PR, but once we commit this PR I think the comment will be confusing.

* design where a user who plugged the cable in a moment later would
* silently end up on the wrong transport. There is no timeout here:
* the loop waits indefinitely, just like the libusb-only path used to.
*/

#include <errno.h>
#include <stdbool.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>

#include "qdl.h"

struct qdl_device_auto {
struct qdl_device base;
struct qdl_device *inner;
long pending_chunk_size;
bool chunk_size_set;
};

static struct qdl_device_auto *to_auto(struct qdl_device *qdl)
{
return container_of(qdl, struct qdl_device_auto, base);
}

static void auto_bind_inner(struct qdl_device_auto *wrap, struct qdl_device *inner)
{
wrap->inner = inner;
wrap->base.max_payload_size = inner->max_payload_size;
if (wrap->chunk_size_set)
inner->set_out_chunk_size(inner, wrap->pending_chunk_size);
}

static int auto_open(struct qdl_device *qdl, const char *serial)
{
struct qdl_device_auto *wrap = to_auto(qdl);
struct qdl_device *usb_dev;
#ifdef _WIN32
Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

we can drop this _WIN32 later, when there is support for Linux QUD driver

struct qdl_device *qud_dev;
int qud_count;
#endif
int visible_prev = -1;
int visible;
int ret;

usb_dev = usb_init();
if (!usb_dev)
return -1;

#ifdef _WIN32
qud_dev = qud_init();
if (!qud_dev) {
qdl_deinit(usb_dev);
return -1;
}
#endif

for (;;) {
ret = usb_open_attempt(usb_dev, serial, &visible);
if (ret == 0) {
#ifdef _WIN32
qdl_deinit(qud_dev);
#endif
auto_bind_inner(wrap, usb_dev);
return 0;
}
if (ret == -EIO)
goto fail;

#ifdef _WIN32
qud_count = qud_probe_present();
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Been jumping back and forth in the patch here... It would be nice if the try_usb_open() and the qud path were symmetrical, but I haven't convinced myself if that's possible or not.

if (qud_count > 0 || ret == -EBUSY) {
if (qud_dev->open(qud_dev, serial) == 0) {
qdl_deinit(usb_dev);
auto_bind_inner(wrap, qud_dev);
return 0;
}
}
visible += qud_count;
#endif

if (visible != visible_prev) {
if (visible == 0)
ux_info("Waiting for EDL device\n");
else if (serial)
ux_info("%d EDL device(s) visible, none match serial \"%s\"\n",
visible, serial);
else
ux_info("%d EDL device(s) visible, none could be opened\n",
visible);
visible_prev = visible;
}

usleep(250000);
}

fail:
qdl_deinit(usb_dev);
#ifdef _WIN32
qdl_deinit(qud_dev);
#endif
return -1;
}

static int auto_read(struct qdl_device *qdl, void *buf, size_t len, unsigned int timeout)
{
struct qdl_device *inner = to_auto(qdl)->inner;

return inner->read(inner, buf, len, timeout);
}

static int auto_write(struct qdl_device *qdl, const void *buf, size_t len, unsigned int timeout)
{
struct qdl_device *inner = to_auto(qdl)->inner;

return inner->write(inner, buf, len, timeout);
}

static void auto_close(struct qdl_device *qdl)
{
struct qdl_device_auto *wrap = to_auto(qdl);

if (!wrap->inner)
return;
wrap->inner->close(wrap->inner);
qdl_deinit(wrap->inner);
wrap->inner = NULL;
}

static void auto_set_out_chunk_size(struct qdl_device *qdl, long size)
{
struct qdl_device_auto *wrap = to_auto(qdl);

if (wrap->inner) {
wrap->inner->set_out_chunk_size(wrap->inner, size);
return;
}
wrap->pending_chunk_size = size;
wrap->chunk_size_set = true;
}

struct qdl_device *auto_init(void)
{
struct qdl_device_auto *wrap = calloc(1, sizeof(*wrap));

if (!wrap)
return NULL;

wrap->base.dev_type = QDL_DEVICE_AUTO;
wrap->base.open = auto_open;
wrap->base.read = auto_read;
wrap->base.write = auto_write;
wrap->base.close = auto_close;
wrap->base.set_out_chunk_size = auto_set_out_chunk_size;
wrap->base.max_payload_size = 1048576;

return &wrap->base;
}
124 changes: 100 additions & 24 deletions firehose.c
Original file line number Diff line number Diff line change
Expand Up @@ -179,15 +179,75 @@ static int firehose_read(struct qdl_device *qdl, int timeout_ms,

ux_debug("FIREHOSE READ: %s\n", buf);

node = firehose_response_parse(buf, n, &error);
if (!node)
return error;
/*
* On stream-oriented transports (Windows COM port via the
* QDLoader driver, virtio-console, ...) a single read can
* deliver multiple back-to-back Firehose responses
* concatenated, since the driver doesn't preserve USB bulk-
* transfer boundaries. Walk the buffer using the "<?xml" ...
* "</data>" envelope to bound each message; the closing tag
* is what really delimits the document so that any rawmode
* binary payload that arrives spliced onto the same read
* doesn't end up fed into libxml2 as if it were XML.
*
* libusb preserves transfer boundaries, so on that path each
* read still contains exactly one document and the loop runs
* once.
*/
char *cursor = buf;
char *bufend = buf + n;

ret = response_parser(node, data, &rawmode);
xmlFreeDoc(node->doc);
while (cursor < bufend) {
char *start = strstr(cursor, "<?xml");
char *xml_end;
size_t chunk;

if (ret >= 0)
resp = ret;
if (!start)
break;

/*
* Bound the XML on the closing </data> tag. If it's
* missing the message was either truncated or doesn't
* fit the schema we know how to parse; hand the rest
* of the buffer to libxml2 and let it error out
* gracefully.
*/
xml_end = strstr(start, "</data>");
if (xml_end) {
xml_end += sizeof("</data>") - 1;
chunk = (size_t)(xml_end - start);
} else {
chunk = (size_t)(bufend - start);
}

node = firehose_response_parse(start, chunk, &error);
if (!node)
return error;

ret = response_parser(node, data, &rawmode);
xmlFreeDoc(node->doc);

if (ret >= 0)
resp = ret;

cursor = start + chunk;

/*
* The response we just parsed told the host to switch
* to raw mode (e.g. the ACK that precedes the binary
* sectors of a <read>). On a stream transport the
* first chunk of that binary payload can have arrived
* tacked onto this same read. Push it back so the
* next qdl_read() picks it up before the transport
* is touched again.
*/
if (rawmode) {
if (cursor < bufend)
qdl_push_back(qdl, cursor,
(size_t)(bufend - cursor));
break;
}
}

if (rawmode)
break;
Expand Down Expand Up @@ -699,31 +759,47 @@ static int firehose_issue_read(struct qdl_device *qdl, struct firehose_op *read_

left = read_op->num_sectors;
while (left > 0) {
chunk_size = MIN(qdl->max_payload_size / sector_size, left);
size_t want;
size_t got;

n = qdl_read(qdl, buf, chunk_size * sector_size, 30000);
if (n < 0) {
ux_err("failed to read sector data\n");
ret = -1;
goto out;
}
chunk_size = MIN(qdl->max_payload_size / sector_size, left);
want = chunk_size * sector_size;

if ((size_t)n != chunk_size * sector_size) {
ux_err("failed to read full sector\n");
ret = -1;
goto out;
/*
* Accumulate the chunk across qdl_read() calls. libusb usually
* delivers an entire bulk transfer in one shot, but stream
* transports (QUD's Windows COM port, virtio-console, ...) can
* fragment it - including the rawmode tail that firehose_read()
* pushed back from the same buffer as the ACK response.
*/
got = 0;
while (got < want) {
n = qdl_read(qdl, (char *)buf + got, want - got, 30000);
if (n < 0) {
ux_err("failed to read sector data\n");
ret = -1;
goto out;
}
if (n == 0) {
ux_err("unexpected EOF while reading sector data\n");
ret = -1;
goto out;
}
got += (size_t)n;
}

if (out_buf) {
if ((size_t)n > out_len - out_offset)
n = out_len - out_offset;
size_t copy = want;

if (copy > out_len - out_offset)
copy = out_len - out_offset;

memcpy(out_buf + out_offset, buf, n);
out_offset += n;
memcpy((char *)out_buf + out_offset, buf, copy);
out_offset += copy;
} else {
n = write(fd, buf, n);
n = write(fd, buf, want);

if (n < 0 || (size_t)n != chunk_size * sector_size) {
if (n < 0 || (size_t)n != want) {
ux_err("failed to write sector data\n");
ret = -1;
goto out;
Expand Down
Loading
Loading