Commit 618a69ce authored by Jonathan Currier's avatar Jonathan Currier
Browse files

Add a unix socket transport interface

parent 6b8622a1
......@@ -17,6 +17,11 @@ mboxd_SOURCES = \
mboxd_LINK = $(LINK)
if ENABLE_UNIX_SOCKET_TRANSPORT
mboxd_SOURCES += \
control_unix.c
endif
if HAVE_MBOX_TRANSPORT
mboxd_SOURCES += \
transport_mbox.c
......@@ -32,6 +37,8 @@ endif
mboxd_LDFLAGS = $(LIBSYSTEMD_LIBS)
mboxd_CFLAGS = $(LIBSYSTEMD_CFLAGS)
include_HEADERS = $(srcdir)/include/hiomapd-socket.h
# MTD Backing storage
include mtd/Makefile.am.include
......@@ -59,7 +66,9 @@ check_PROGRAMS =
XFAIL_TESTS =
AM_LIBS = $(CODE_COVERAGE_LIBS)
AM_CPPFLAGS = $(CODE_COVERAGE_CPPFLAGS) -UNDEBUG
AM_CPPFLAGS = $(CODE_COVERAGE_CPPFLAGS) -UNDEBUG -I$(srcdir)/include
AM_CFLAGS = $(CODE_COVERAGE_CFLAGS)
AM_CXXFLAGS = $(CODE_COVERAGE_CXXFLAGS)
......
......@@ -45,6 +45,13 @@ AS_IF([test "x$enable_oe_sdk" == "xyes"],
AC_SUBST([OESDK_TESTCASE_FLAGS], [$testcase_flags])
)
AC_ARG_ENABLE([unix-socket-transport],
AS_HELP_STRING([--enable-unix-socket-transport], [Turn on unix socket control interface]))
AS_IF([test "x$enable_unix_socket_transport" == "xyes"],
AM_CONDITIONAL(ENABLE_UNIX_SOCKET_TRANSPORT, true),
AM_CONDITIONAL(ENABLE_UNIX_SOCKET_TRANSPORT, false))
AC_ARG_ENABLE([virtual-pnor],
AS_HELP_STRING([--enable-virtual-pnor], [Turn on virtual pnor])
[
......
#define _GNU_SOURCE
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/un.h>
#include <unistd.h>
#include <sys/epoll.h>
#include <stdlib.h>
#include <stdbool.h>
#include "mboxd.h"
#include "common.h"
#include "protocol.h"
#include "hiomapd-socket.h"
struct ep_ctx;
struct hust_context;
struct ep_ops {
int (*work)(struct hust_context *, struct ep_ctx*, struct epoll_event*);
};
struct ep_ctx {
int fd;
struct ep_ops *ops;
};
static const struct transport_ops hust_ops;
struct hust_context {
struct mbox_context *context;
struct ops_container w;
int hustfd;
};
static inline struct hust_context*
container_to_uctx(struct ops_container *container)
{
return container_of(container, struct hust_context, w);
}
int husc_ping_hdlr(struct hust_context *, struct ep_ctx *, struct unix_hiomapd_request *);
/* transport handlers */
/* autogen'd with:
*
for type in reset getinfo getflashinfo closewindow markdirty flush asc erase flashcontrollost daemonready protocolreset windowreset; do
echo "static int hust_${type}_hdlr(struct hust_context *, struct ep_ctx *,
struct unix_hiomapd_request *);"
done
*
*/
static int hust_reset_hdlr(struct hust_context *, struct ep_ctx *,
struct unix_hiomapd_request *);
static int hust_getinfo_hdlr(struct hust_context *, struct ep_ctx *,
struct unix_hiomapd_request *);
static int hust_getflashinfo_hdlr(struct hust_context *, struct ep_ctx *,
struct unix_hiomapd_request *);
static int hust_closewindow_hdlr(struct hust_context *, struct ep_ctx *,
struct unix_hiomapd_request *);
static int hust_markdirty_hdlr(struct hust_context *, struct ep_ctx *,
struct unix_hiomapd_request *);
static int hust_flush_hdlr(struct hust_context *, struct ep_ctx *,
struct unix_hiomapd_request *);
static int hust_ack_hdlr(struct hust_context *, struct ep_ctx *,
struct unix_hiomapd_request *);
static int hust_erase_hdlr(struct hust_context *, struct ep_ctx *,
struct unix_hiomapd_request *);
static int hust_flashcontrollost_hdlr(struct hust_context *, struct ep_ctx *,
struct unix_hiomapd_request *);
static int hust_daemonready_hdlr(struct hust_context *, struct ep_ctx *,
struct unix_hiomapd_request *);
static int hust_protocolreset_hdlr(struct hust_context *, struct ep_ctx *,
struct unix_hiomapd_request *);
static int hust_windowreset_hdlr(struct hust_context *, struct ep_ctx *,
struct unix_hiomapd_request *);
static int hust_createreadwindow_hdlr(struct hust_context *, struct ep_ctx *,
struct unix_hiomapd_request *);
static int hust_createwritewindow_hdlr(struct hust_context *, struct ep_ctx *,
struct unix_hiomapd_request *);
struct {
int (*hdlr)(struct hust_context *, struct ep_ctx *, struct unix_hiomapd_request *);
} husc_dispatch_lut[] = {
[husc_ping] = { .hdlr = husc_ping_hdlr,},
[hust_createreadwindow] = { .hdlr = hust_createreadwindow_hdlr,},
[hust_createwritewindow] = { .hdlr = hust_createwritewindow_hdlr,},
[hust_reset] = { .hdlr = hust_reset_hdlr,},
[hust_getinfo] = { .hdlr = hust_getinfo_hdlr,},
[hust_getflashinfo] = { .hdlr = hust_getflashinfo_hdlr,},
[hust_closewindow] = { .hdlr = hust_closewindow_hdlr,},
[hust_markdirty] = { .hdlr = hust_markdirty_hdlr,},
[hust_flush] = { .hdlr = hust_flush_hdlr,},
[hust_ack] = { .hdlr = hust_ack_hdlr,},
[hust_erase] = { .hdlr = hust_erase_hdlr,},
[hust_flashcontrollost] = { .hdlr = hust_flashcontrollost_hdlr,},
[hust_daemonready] = { .hdlr = hust_daemonready_hdlr,},
[hust_protocolreset] = { .hdlr = hust_protocolreset_hdlr,},
[hust_windowreset] = { .hdlr = hust_windowreset_hdlr,},
};
int epoll_fd_work(struct hust_context *, struct ep_ctx*, struct epoll_event *);
int conn_fd_work(struct hust_context *, struct ep_ctx*, struct epoll_event *);
struct ep_ops listen_socket_ops = {
.work = epoll_fd_work,
};
struct ep_ops conn_ops = {
.work = conn_fd_work,
};
int transport_unix_dispatch(struct ops_container *container,
struct epoll_event *evt)
{
struct hust_context *uctx = container_to_uctx(container);
struct mbox_context *context = uctx->context;
const static size_t evp_count = 0x10;
/* Having more than one client at a time is rather unlikely,
* so this is probably overkill*/
struct epoll_event evps[0x10] = { 0 };
struct ep_ctx *ectx;
int count;
int i;
/* First event should be an out event, that means we need to do
* some late setup. */
if (evt->events & EPOLLOUT)
{
struct epoll_event evx = {
.events = EPOLLIN,
.data = { .ptr = evt->data.ptr, },
};
MSG_DBG("Unix socket out event\n");
epoll_ctl(context->epollfd, EPOLL_CTL_MOD, uctx->hustfd, &evx);
return protocol_events_put(context, container);
}
if (!(evt->events & POLLIN))
{
MSG_ERR("Unix socket event, but it's an invalid type\n");
return -EINVAL;
}
MSG_DBG("Unix socket Event\n");
count = epoll_wait(uctx->hustfd, evps, evp_count, 0);
MSG_DBG("epoll_wait(): %d(%m)\n", count);
for (i = 0; i < count; i++)
{
int rc;
ectx = evps[i].data.ptr;
MSG_DBG("fd[%d].epoll_event.event: %08x\n", ectx->fd, evps[i].events);
/* NOTE: ectx might be freed after this call
* set it to null to be explicit about it*/
/* It's debatable if we should forward this return code back,
* since were in a loop of non-interlocking things.
* however it probably doesn't matter because we are not a
* high performance server (the though of this needing high
* throughput is horrifying)*/
rc = ectx->ops->work(uctx, ectx, evps + i);
ectx = NULL;
}
}
int init_unix(struct mbox_context *context, struct transport_ops *ops)
{
struct sockaddr_un sa = {
.sun_family = AF_UNIX,
.sun_path = "/var/run/hiomapd"
};
struct hust_context *uctx = calloc(1, sizeof(*uctx));
int usfd = socket(AF_UNIX, SOCK_SEQPACKET ,0);
int rc;
int epfd = epoll_create1(EPOLL_CLOEXEC);
struct epoll_event evp = {
.events = EPOLLIN,
};
struct ep_ctx *ectx;
if (!uctx) {
MSG_ERR("Failed to allocate unix socket transport context\n");
close(epfd);
return -ENOMEM;
}
uctx->context = context;
uctx->w.ops = ops;
MSG_DBG("%s:%d\n", __func__, __LINE__);
/* destroy an existing socket, otherwise we couldn't bind*/
unlink(sa.sun_path);
/* As a reminder: we use the void cast to promise the compiler:
* 'yes this is totally aligned correctly, so forget your knowledge
* of it's aligment requirement'
*/
if ((rc = bind(usfd, (struct sockaddr *)(void*)&sa, sizeof(sa))))
goto out_bind;
if ((rc = listen(usfd, 8)))
goto out_listen;
ectx = calloc(1, sizeof(ectx));
ectx->fd = usfd;
ectx->ops = &listen_socket_ops;
evp.data.ptr = ectx;
epoll_ctl(epfd, EPOLL_CTL_ADD, usfd, &evp);
uctx->hustfd = epfd;
/* Set as the default transport, but only
* if no other transports have registered */
if (!context->transport)
context->transport = &uctx->w;
register_event_fd(uctx->hustfd, context, &uctx->w);
return 0;
out_listen:
out_bind:
return -1;
}
int epoll_fd_work(struct hust_context *uctx, struct ep_ctx* ectx, struct epoll_event *ev)
{
struct epoll_event evp = {
.events = EPOLLIN,
};
struct ep_ctx *nectx;
int new_fd;
new_fd = accept4(ectx->fd, NULL, NULL, SOCK_NONBLOCK | SOCK_CLOEXEC);
ectx = calloc(1, sizeof(ectx));
ectx->fd = new_fd;
ectx->ops = &conn_ops;
evp.data.ptr = ectx;
epoll_ctl(uctx->hustfd, EPOLL_CTL_ADD, new_fd, &evp);
return 0;
}
int conn_fd_work(struct hust_context *uctx, struct ep_ctx* ectx, struct epoll_event *ev)
{
MSG_DBG("%s:%d\n", __func__, __LINE__);
struct unix_hiomapd_request req = { 0 };
ssize_t readlen;
if ((ev->events & EPOLLIN) &&
(readlen = read(ectx->fd, &req, sizeof(req))))
{
MSG_DBG("Got request sized %08x\n", (uint32_t)readlen);
MSG_DBG("Got request: %08x\n", req.request);
if ((readlen >= 4) && (req.request < husx_api_limit) &&
(husc_dispatch_lut[req.request].hdlr))
{
int rc;
rc = husc_dispatch_lut[req.request].hdlr(uctx, ectx, &req);
if (rc)
MSG_DBG("WARNING: cmd %x return %x\n", req.request, rc);
return 0;
}
return -1;
}
if (ev->events & EPOLLHUP)
{
close(ectx->fd);
free(ectx);
return 0;
}
MSG_DBG("Got unhandled request: %08x\n", req.request);
return -1;
}
int husc_ping_hdlr(struct hust_context * uctx, struct ep_ctx * ectx,
struct unix_hiomapd_request *req)
{
}
int hust_createreadwindow_hdlr(struct hust_context * uctx, struct ep_ctx *ectx,
struct unix_hiomapd_request *req)
{
struct mbox_context *context = uctx->context;
struct protocol_create_window io = { 0 };
int rc;
io.req.offset = req->args[0] & 0xffff;
io.req.size = req->args[1] & 0xffff;
MSG_DBG("%s:%d\n", __func__, __LINE__);
io.req.ro = true;
if ((rc = context->protocol->create_window(context, &io)) < 0)
return rc;
memset(req, 0, sizeof(*req));
req->request = 1;
req->args[0] = context->bmc_events;
req->args[2] = io.resp.lpc_address;
req->args[3] = io.resp.size;
req->args[4] = io.resp.offset;
req->args[1] = 4;
write(ectx->fd, req, sizeof(*req));
return rc;
}
int hust_createwritewindow_hdlr(struct hust_context * uctx, struct ep_ctx *ectx,
struct unix_hiomapd_request *req)
{
struct mbox_context *context = uctx->context;
struct protocol_create_window io = { 0 };
int rc;
io.req.offset = req->args[0] & 0xffff;
io.req.size = req->args[1] & 0xffff;
io.req.ro = false;
if ((rc = context->protocol->create_window(context, &io)) < 0)
return rc;
memset(req, 0, sizeof(*req));
req->request = 1;
req->args[0] = context->bmc_events;
req->args[2] = io.resp.lpc_address;
req->args[3] = io.resp.size;
req->args[4] = io.resp.offset;
req->args[1] = 4;
write(ectx->fd, req, sizeof(*req));
return rc;
}
static int hust_closewindow_hdlr(struct hust_context * uctx, struct ep_ctx *ectx,
struct unix_hiomapd_request *req)
{
struct mbox_context *context = uctx->context;
struct protocol_close io = { 0 };
int rc;
io.req.flags = req->args[0] & 0xff;
rc = context->protocol->close(context, &io);
memset(req, 0, sizeof(*req));
req->request = 1;
req->args[0] = context->bmc_events;
write(ectx->fd, req, sizeof(*req));
return rc;
}
static int hust_reset_hdlr(struct hust_context *uctx, struct ep_ctx *ectx,
struct unix_hiomapd_request *req)
{
struct mbox_context *context = uctx->context;
int rc;
if ((rc = context->protocol->reset(context)) < 0)
return rc;
memset(req, 0, sizeof(*req));
req->request = 1;
req->args[0] = context->bmc_events;
req->args[1] = 0;
write(ectx->fd, req, sizeof(*req));
return rc;
}
static int hust_getinfo_hdlr(struct hust_context *uctx, struct ep_ctx *ectx,
struct unix_hiomapd_request *req)
{
struct mbox_context *context = uctx->context;
struct protocol_get_info io;
int rc;
io.req.api_version = req->args[0];
if ((rc = context->protocol->get_info(context, &io)) < 0)
return rc;
/* We need to do this before sending a reply since otherwise the
* bmc events will be.. poorly defined */
context->transport = &uctx->w;
protocol_events_set(context, context->bmc_events);
memset(req, 0, sizeof(*req));
req->request = 1;
req->args[0] = context->bmc_events;
req->args[2] = io.resp.api_version;
req->args[1] = 1;
if (io.resp.api_version >= 2)
{
req->args[3] = io.resp.v2.block_size_shift;
req->args[4] = io.resp.v2.timeout;
req->args[1] += 2;
}
write(ectx->fd, req, sizeof(*req));
return rc;
}
static int hust_getflashinfo_hdlr(struct hust_context *uctx, struct ep_ctx *ectx,
struct unix_hiomapd_request *req)
{
struct mbox_context *context = uctx->context;
struct protocol_get_flash_info io;
int rc;
if ((rc = context->protocol->get_flash_info(context, &io)) < 0)
return rc;
memset(req, 0, sizeof(*req));
req->request = 1;
req->args[0] = context->bmc_events;
req->args[2] = io.resp.v2.flash_size;
req->args[3] = io.resp.v2.erase_size;
req->args[1] = 2;
write(ectx->fd, req, sizeof(*req));
return rc;
}
static int hust_markdirty_hdlr(struct hust_context *uctx, struct ep_ctx *ectx,
struct unix_hiomapd_request *req)
{
struct mbox_context *context = uctx->context;
struct protocol_mark_dirty io;
int rc;
io.req.v2.offset = req->args[0];
io.req.v2.size = req->args[1];
if ((rc = context->protocol->mark_dirty(context, &io)) < 0)
return rc;
memset(req, 0, sizeof(*req));
req->request = 1;
req->args[0] = context->bmc_events;
write(ectx->fd, req, sizeof(*req));
return rc;
}
static int hust_flush_hdlr(struct hust_context *uctx, struct ep_ctx *ectx,
struct unix_hiomapd_request *req)
{
struct mbox_context *context = uctx->context;
int rc;
if ((rc = context->protocol->flush(context, NULL /* No args in v2 */)) < 0)
return rc;
memset(req, 0, sizeof(*req));
req->request = 1;
req->args[0] = context->bmc_events;
write(ectx->fd, req, sizeof(*req));
return rc;
}
static int hust_ack_hdlr(struct hust_context *uctx, struct ep_ctx *ectx,
struct unix_hiomapd_request *req)
{
struct mbox_context *context = uctx->context;
struct protocol_ack io;
int rc;
io.req.flags = req->args[0];
if ((rc = context->protocol->ack(context, &io)))
return rc;
memset(req, 0, sizeof(*req));
req->request = 1;
req->args[0] = context->bmc_events;
req->args[1] = 0;
write(ectx->fd, req, sizeof(*req));
return rc;
}
static int hust_erase_hdlr(struct hust_context *uctx, struct ep_ctx *ectx,
struct unix_hiomapd_request *req)
{
struct mbox_context *context = uctx->context;
struct protocol_erase io;
int rc;
io.req.offset = req->args[0];
io.req.size = req->args[1];
if ((rc = context->protocol->erase(context, &io)))
return rc;
memset(req, 0, sizeof(*req));
req->request = 1;
req->args[0] = context->bmc_events;
write(ectx->fd, req, sizeof(*req));
return rc;
}
/* Properties are not, yet, supported */
static int hust_flashcontrollost_hdlr(struct hust_context *uctx, struct ep_ctx *ectx,
struct unix_hiomapd_request *req)
{
}
static int hust_daemonready_hdlr(struct hust_context *uctx, struct ep_ctx *ectx,
struct unix_hiomapd_request *req)
{
}
static int hust_protocolreset_hdlr(struct hust_context *uctx, struct ep_ctx *ectx,
struct unix_hiomapd_request *req)
{
}
static int hust_windowreset_hdlr(struct hust_context *uctx, struct ep_ctx *ectx,
struct unix_hiomapd_request *req)
{
}
static int hust_put_events(struct ops_container *container, uint8_t mask)
{
/* ATM this a no-op, in the real world, we should probably
* send out a 'response' packet to all the connected sockets...
* however, atm I've no mechanism to do that, and no idea what
* the clients should do even if they recieve such an msg*/
return 0;
}
static int hust_set_events(struct ops_container *container, uint8_t events,
uint8_t mask)
{
/* dbus calls the same func as above for this
* (execpt is sends in (events & mask)) */
return 0;
}
static int hust_clear_events(struct ops_container *container, uint8_t events,
uint8_t mask)
{
/* Same story as the other two */
return 0;
}
DECLARE_TRANSPORT_OPS(hust) = {
.init = init_unix,
.event = transport_unix_dispatch,
.fini = 0,
.put_events = hust_put_events,
.set_events = hust_set_events,
.clear_events = hust_clear_events,
};
#ifndef HIOMAPD_SOCKET_H
#define HIOMAPD_SOCKET_H
#include <stdint.h>
/* hiomapd unix socket cmd->husc
* hiomapd unix socket transport->hust
* some of these are redundant or won't ever
* be implemented, but they are hear to mark was the only interface
* provided*/
enum {
husc_invalid = 0,
/* This is an invalid message, it is only used on responses */
husx_response = 1,
/* control functions */
husc_ping,
husc_reset,
husc_kill,
husc_markflashmodified,
husc_suspend,
husc_resume,
husc_setbackend,
husc_daemonstate,
husc_lpcstate,
/* transport functions */
hust_reset,
hust_getinfo,
hust_getflashinfo,
hust_createreadwindow,
hust_createwritewindow,
hust_closewindow,
hust_markdirty,
hust_flush,
hust_ack,
hust_erase,
/*v3 options? */
hust_getflashname,
hust_lock,
/* Properties...
* It may be simpler to just emita properties byte
* over the socket interface
*/
hust_flashcontrollost,
hust_daemonready,
hust_protocolreset,
hust_windowreset,
/* end of list marker */
husx_api_limit
};
/*
* Each request type will use a different number of args.
* argument usage and placement is request defined.
*
*/
struct unix_hiomapd_request {
uint32_t request;
uint32_t args[7];
};
/* request info blocks */
/* arguments who's type is smaller than uint32_t will be truncated down to size
* so for example: a uin16_t in argument zero would only use the bottom 16 bit,
* the next argument will be in arg1.
* also note: these are all native endian encoded.
*
* responses: the first response argument is the status byte,
* the second is the number of additional arguments (extranious?)
*/
/* hust_createreadwindow
*
* Takes two arguments:
* arg[0]: window offset (uint16_t)
* arg[1]: window size (uint16_t)
*
* TODO: how exactly does hiomapd use these values.
* It looks like they are in units of 'page size'
*
*/
/* hust_createwritewindow
*
* Takes two arguments:
* arg[0]: window offset (uint16_t)
* arg[1]: window size (uint16_t)
*
* TODO: how exactly does hiomapd use these values.
* It looks like they are in units of 'page size'
*
*/
/* hust_ack
*
* Take on argument:
* arg[0]: flags
*
*/
#endif
......@@ -44,7 +44,6 @@ enum api_version {
BMC_EVENT_FLASH_CTRL_LOST | \
BMC_EVENT_DAEMON_READY)
#define MAPS_FLASH (1 << 0)
#define MAPS_MEM (1 << 1)
#define STATE_SUSPENDED (1 << 7)
......
......@@ -62,7 +62,7 @@ int protocol_events_set(struct mbox_context *context, uint8_t bmc_event)
context->bmc_events |= bmc_event;
return (context->transport) ?
context->transport->set_events(context, bmc_event, mask) :
context->transport->ops->set_events(context, bmc_event, mask) :
(mbox_log(LOG_ERR, "Called %s, but transport not assigned", __func__), -1);
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment