This commit is contained in:
2026-04-10 15:45:53 +04:00
commit 95f3f072da
24 changed files with 3526 additions and 0 deletions
@@ -0,0 +1,11 @@
--- a/drivers/net/wireless/ath/ath12k/core.c
+++ b/drivers/net/wireless/ath/ath12k/core.c
@@ -77,6 +77,8 @@ static int ath12k_core_rfkill_config(struct ath12k_base *ab)
if (!(ab->target_caps.sys_cap_info & WMI_SYS_CAP_INFO_RFKILL))
return 0;
+ return 0;
+
if (ath12k_acpi_get_disable_rfkill(ab))
return 0;
+9
View File
@@ -0,0 +1,9 @@
obj-m += cpu_parking.o
KDIR ?= /lib/modules/$(shell uname -r)/build
all:
$(MAKE) -C $(KDIR) M=$(PWD) modules
clean:
$(MAKE) -C $(KDIR) M=$(PWD) clean
+215
View File
@@ -0,0 +1,215 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* CPU core parking for Snapdragon X Elite.
*/
#include <linux/module.h>
#include <linux/cpu.h>
#include <linux/tick.h>
#include <linux/workqueue.h>
#include <linux/bitops.h>
#define DRIVER_NAME "cpu_parking"
#define MAX_CPUS 16
static int set_enabled(const char *val, const struct kernel_param *kp);
static const struct kernel_param_ops enabled_ops = {
.set = set_enabled,
.get = param_get_bool,
};
static bool enabled = true;
module_param_cb(enabled, &enabled_ops, &enabled, 0644);
MODULE_PARM_DESC(enabled, "Enable/disable parking (default: 1)");
static unsigned int sample_interval_ms = 1000;
module_param(sample_interval_ms, uint, 0644);
MODULE_PARM_DESC(sample_interval_ms, "Polling interval in ms (default: 1000)");
static unsigned int busy_up_pct = 75;
module_param(busy_up_pct, uint, 0644);
MODULE_PARM_DESC(busy_up_pct, "Avg util%% above which to unpark a core (default: 75)");
static unsigned int busy_down_pct = 30;
module_param(busy_down_pct, uint, 0644);
MODULE_PARM_DESC(busy_down_pct, "Avg util%% below which to park a core (default: 30)");
static unsigned int min_online;
module_param(min_online, uint, 0644);
MODULE_PARM_DESC(min_online, "Minimum P-cores to keep online (default: 0)");
static unsigned int parkable_cpus = 0xFF0;
module_param(parkable_cpus, uint, 0644);
MODULE_PARM_DESC(parkable_cpus, "Bitmask of parkable CPUs (default: 0xFF0 = CPUs 4-11)");
static unsigned int parked_mask;
module_param_named(parked_cpus, parked_mask, uint, 0444);
MODULE_PARM_DESC(parked_cpus, "Current parked CPU bitmask (read-only)");
static unsigned int last_avg_util;
module_param(last_avg_util, uint, 0444);
MODULE_PARM_DESC(last_avg_util, "Last average utilization %% (read-only)");
static u64 prev_idle[MAX_CPUS];
static u64 prev_wall[MAX_CPUS];
static struct delayed_work parking_work;
static void snapshot_cpu(unsigned int cpu)
{
u64 wall;
if (cpu >= MAX_CPUS)
return;
prev_idle[cpu] = get_cpu_idle_time_us(cpu, &wall);
prev_wall[cpu] = wall;
}
static void snapshot_all_online(void)
{
unsigned int cpu;
for_each_online_cpu(cpu)
snapshot_cpu(cpu);
}
static void park_cpu(unsigned int cpu)
{
if (!cpu_online(cpu))
return;
if (remove_cpu(cpu)) {
pr_warn(DRIVER_NAME ": failed to park cpu%u\n", cpu);
return;
}
parked_mask |= BIT(cpu);
pr_info(DRIVER_NAME ": parked cpu%u (parked=0x%03x)\n", cpu, parked_mask);
}
static void unpark_cpu(unsigned int cpu)
{
if (cpu_online(cpu))
return;
if (add_cpu(cpu)) {
pr_warn(DRIVER_NAME ": failed to unpark cpu%u\n", cpu);
return;
}
parked_mask &= ~BIT(cpu);
snapshot_cpu(cpu);
pr_info(DRIVER_NAME ": unparked cpu%u (parked=0x%03x)\n", cpu, parked_mask);
}
static void unpark_all(void)
{
int cpu;
for_each_possible_cpu(cpu) {
if (parked_mask & BIT(cpu))
unpark_cpu(cpu);
}
}
static void parking_work_fn(struct work_struct *work)
{
unsigned int cpu, nr_sampled = 0, total_util = 0;
unsigned int online_parkable = 0, parked_parkable;
u64 idle, wall, d_idle, d_wall;
int target;
if (!enabled) {
unpark_all();
return;
}
for_each_online_cpu(cpu) {
if (cpu >= MAX_CPUS)
continue;
idle = get_cpu_idle_time_us(cpu, &wall);
if (idle == (u64)-1)
continue;
d_idle = idle - prev_idle[cpu];
d_wall = wall - prev_wall[cpu];
prev_idle[cpu] = idle;
prev_wall[cpu] = wall;
if (d_wall > 0 && d_idle <= d_wall)
total_util += (unsigned int)((d_wall - d_idle) * 100 / d_wall);
nr_sampled++;
if (parkable_cpus & BIT(cpu))
online_parkable++;
}
if (nr_sampled == 0)
goto resched;
last_avg_util = total_util / nr_sampled;
parked_parkable = hweight32(parked_mask & parkable_cpus);
if (last_avg_util > busy_up_pct && parked_parkable > 0) {
for (cpu = 0; cpu < MAX_CPUS; cpu++) {
if ((parked_mask & BIT(cpu)) &&
(parkable_cpus & BIT(cpu))) {
unpark_cpu(cpu);
break;
}
}
} else if (last_avg_util < busy_down_pct &&
online_parkable > min_online) {
for (target = MAX_CPUS - 1; target >= 0; target--) {
if ((parkable_cpus & BIT(target)) &&
cpu_online(target) &&
!(parked_mask & BIT(target))) {
park_cpu(target);
break;
}
}
}
resched:
schedule_delayed_work(&parking_work,
msecs_to_jiffies(sample_interval_ms));
}
static int set_enabled(const char *val, const struct kernel_param *kp)
{
bool was_enabled = enabled;
int ret;
ret = param_set_bool(val, kp);
if (ret)
return ret;
if (!was_enabled && enabled) {
snapshot_all_online();
schedule_delayed_work(&parking_work,
msecs_to_jiffies(sample_interval_ms));
pr_info(DRIVER_NAME ": re-enabled\n");
}
return 0;
}
static int __init cpu_parking_init(void)
{
snapshot_all_online();
INIT_DELAYED_WORK(&parking_work, parking_work_fn);
schedule_delayed_work(&parking_work,
msecs_to_jiffies(sample_interval_ms));
pr_info(DRIVER_NAME ": loaded (parkable=0x%03x interval=%ums up=%u%% down=%u%%)\n",
parkable_cpus, sample_interval_ms, busy_up_pct, busy_down_pct);
return 0;
}
static void __exit cpu_parking_exit(void)
{
cancel_delayed_work_sync(&parking_work);
unpark_all();
pr_info(DRIVER_NAME ": unloaded, all cores online\n");
}
module_init(cpu_parking_init);
module_exit(cpu_parking_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("CPU core parking for Snapdragon X Elite (x1e80100)");
+13
View File
@@ -0,0 +1,13 @@
ifneq ($(KERNELRELEASE),)
obj-m += ec-reboot.o
ec-reboot-objs := ec_reboot.o
else
KERNEL_DIR ?= /lib/modules/$(shell uname -r)/build
PWD := $(shell pwd)
all:
$(MAKE) -C $(KERNEL_DIR) M=$(PWD) modules
clean:
$(MAKE) -C $(KERNEL_DIR) M=$(PWD) clean
endif
+73
View File
@@ -0,0 +1,73 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/module.h>
#include <linux/kobject.h>
#include <linux/surface_aggregator/controller.h>
static ssize_t reboot_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
struct ssam_controller *ctrl;
struct ssam_request rqst = {};
int ret;
if (count < 1 || buf[0] != '1')
return -EINVAL;
ctrl = ssam_get_controller();
if (!ctrl)
return -ENODEV;
rqst.target_category = 0x01;
rqst.target_id = 0x01;
rqst.command_id = 0x14;
rqst.instance_id = 0x00;
rqst.flags = 0;
rqst.length = 0;
rqst.payload = NULL;
ret = ssam_request_do_sync(ctrl, &rqst, NULL);
if (ret)
return ret;
return count;
}
static struct kobj_attribute reboot_attr = __ATTR_WO(reboot);
static struct attribute *ec_reboot_attrs[] = {
&reboot_attr.attr,
NULL,
};
static const struct attribute_group ec_reboot_group = {
.attrs = ec_reboot_attrs,
};
static struct kobject *ec_reboot_kobj;
static int __init ec_reboot_init(void)
{
int ret;
ec_reboot_kobj = kobject_create_and_add("ec_reboot", kernel_kobj);
if (!ec_reboot_kobj)
return -ENOMEM;
ret = sysfs_create_group(ec_reboot_kobj, &ec_reboot_group);
if (ret)
kobject_put(ec_reboot_kobj);
return ret;
}
static void __exit ec_reboot_exit(void)
{
kobject_put(ec_reboot_kobj);
}
module_init(ec_reboot_init);
module_exit(ec_reboot_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("EC hard reset via SSAM");
@@ -0,0 +1,99 @@
--- a/drivers/acpi/platform_profile.c
+++ b/drivers/acpi/platform_profile.c
@@ -176,7 +176,8 @@ static ssize_t profile_store(struct device *dev,
return ret;
}
- sysfs_notify(acpi_kobj, NULL, "platform_profile");
+ if (acpi_kobj)
+ sysfs_notify(acpi_kobj, NULL, "platform_profile");
return count;
}
@@ -341,7 +342,8 @@ static ssize_t platform_profile_store(struct kobject *kobj,
return ret;
}
- sysfs_notify(acpi_kobj, NULL, "platform_profile");
+ if (acpi_kobj)
+ sysfs_notify(acpi_kobj, NULL, "platform_profile");
return count;
}
@@ -377,7 +379,8 @@ void platform_profile_notify(struct device *dev)
scoped_cond_guard(mutex_intr, return, &profile_lock) {
_notify_class_profile(dev, NULL);
}
- sysfs_notify(acpi_kobj, NULL, "platform_profile");
+ if (acpi_kobj)
+ sysfs_notify(acpi_kobj, NULL, "platform_profile");
}
EXPORT_SYMBOL_GPL(platform_profile_notify);
@@ -425,7 +428,8 @@ int platform_profile_cycle(void)
return err;
}
- sysfs_notify(acpi_kobj, NULL, "platform_profile");
+ if (acpi_kobj)
+ sysfs_notify(acpi_kobj, NULL, "platform_profile");
return 0;
}
@@ -487,9 +491,11 @@ struct device *platform_profile_register(struct device *dev, const char *name,
goto cleanup_ida;
}
- sysfs_notify(acpi_kobj, NULL, "platform_profile");
+ if (acpi_kobj)
+ sysfs_notify(acpi_kobj, NULL, "platform_profile");
- err = sysfs_update_group(acpi_kobj, &platform_profile_group);
+ err = acpi_kobj ? sysfs_update_group(acpi_kobj, &platform_profile_group)
+ : 0;
if (err)
goto cleanup_cur;
@@ -519,8 +525,10 @@ void platform_profile_remove(struct device *dev)
ida_free(&platform_profile_ida, pprof->minor);
device_unregister(&pprof->dev);
- sysfs_notify(acpi_kobj, NULL, "platform_profile");
- sysfs_update_group(acpi_kobj, &platform_profile_group);
+ if (acpi_kobj) {
+ sysfs_notify(acpi_kobj, NULL, "platform_profile");
+ sysfs_update_group(acpi_kobj, &platform_profile_group);
+ }
}
EXPORT_SYMBOL_GPL(platform_profile_remove);
@@ -567,14 +575,16 @@ static int __init platform_profile_init(void)
{
int err;
- if (acpi_disabled)
- return -EOPNOTSUPP;
-
err = class_register(&platform_profile_class);
if (err)
return err;
- err = sysfs_create_group(acpi_kobj, &platform_profile_group);
+ /*
+ * Legacy sysfs interface under /sys/firmware/acpi/ is only available
+ * when ACPI is enabled. The class-based interface works regardless.
+ */
+ err = acpi_kobj ? sysfs_create_group(acpi_kobj, &platform_profile_group)
+ : 0;
if (err)
class_unregister(&platform_profile_class);
@@ -584,7 +594,8 @@ static int __init platform_profile_init(void)
static void __exit platform_profile_exit(void)
{
- sysfs_remove_group(acpi_kobj, &platform_profile_group);
+ if (acpi_kobj)
+ sysfs_remove_group(acpi_kobj, &platform_profile_group);
class_unregister(&platform_profile_class);
}
module_init(platform_profile_init);
+197
View File
@@ -0,0 +1,197 @@
diff -ruN a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c
--- a/drivers/dma/qcom/gpi.c
+++ b/drivers/dma/qcom/gpi.c
@@ -48,6 +48,12 @@
#define TRE_SPI_GO_CS GENMASK(10, 8)
#define TRE_SPI_GO_FRAG BIT(26)
+/* QSPI GO WD0 - flags field is 12 bits at [31:20] instead of SPI's 8 bits */
+#define TRE_QSPI_GO_FLAGS GENMASK(31, 20)
+
+/* QSPI Config0 WD0 - dummy clock count */
+#define TRE_SPI_C0_DUMMY_CLK GENMASK(21, 14)
+
/* GO WD2 */
#define TRE_RX_LEN GENMASK(23, 0)
@@ -1275,8 +1281,17 @@
upper_32_bits(ring->phys_addr));
gpi_write_reg(gpii, chan->ch_cntxt_db_reg + CNTXT_5_RING_RP_MSB - CNTXT_4_RING_RP_LSB,
upper_32_bits(ring->phys_addr));
- gpi_write_reg(gpii, gpii->regs + GPII_n_CH_k_SCRATCH_0_OFFS(id, chid),
- GPII_n_CH_k_SCRATCH_0(pair_chid, chan->protocol, chan->seid));
+ /*
+ * For QSPI, use the SE's native protocol value (9) in SCRATCH_0.
+ * The DT binding uses QCOM_GPI_QSPI=4 for channel selection, but
+ * the GSI firmware expects the actual SE protocol ID.
+ */
+ {
+ u32 hw_proto = chan->protocol == QCOM_GPI_QSPI ?
+ 9 : chan->protocol;
+ gpi_write_reg(gpii, gpii->regs + GPII_n_CH_k_SCRATCH_0_OFFS(id, chid),
+ GPII_n_CH_k_SCRATCH_0(pair_chid, hw_proto, chan->seid));
+ }
gpi_write_reg(gpii, gpii->regs + GPII_n_CH_k_SCRATCH_1_OFFS(id, chid), 0);
gpi_write_reg(gpii, gpii->regs + GPII_n_CH_k_SCRATCH_2_OFFS(id, chid), 0);
gpi_write_reg(gpii, gpii->regs + GPII_n_CH_k_SCRATCH_3_OFFS(id, chid), 0);
@@ -1699,6 +1714,116 @@
return tre_idx;
}
+static void qspi_add_config_tre(struct gpi_spi_config *spi,
+ struct gpi_tre *tre)
+{
+ tre->dword[0] = u32_encode_bits(spi->word_len, TRE_SPI_C0_WORD_SZ);
+ tre->dword[0] |= u32_encode_bits(spi->loopback_en, TRE_SPI_C0_LOOPBACK);
+ tre->dword[0] |= u32_encode_bits(spi->clock_pol_high, TRE_SPI_C0_CPOL);
+ tre->dword[0] |= u32_encode_bits(spi->data_pol_high, TRE_SPI_C0_CPHA);
+ tre->dword[0] |= u32_encode_bits(spi->pack_en, TRE_SPI_C0_TX_PACK);
+ tre->dword[0] |= u32_encode_bits(spi->pack_en, TRE_SPI_C0_RX_PACK);
+ tre->dword[0] |= u32_encode_bits(spi->dummy_clk_cnt, TRE_SPI_C0_DUMMY_CLK);
+
+ tre->dword[1] = 0;
+
+ tre->dword[2] = u32_encode_bits(spi->clk_div, TRE_C0_CLK_DIV);
+ tre->dword[2] |= u32_encode_bits(spi->clk_src, TRE_C0_CLK_SRC);
+
+ tre->dword[3] = u32_encode_bits(TRE_TYPE_CONFIG0, TRE_FLAGS_TYPE);
+ tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_CHAIN);
+}
+
+static int gpi_create_qspi_tre(struct gchan *chan, struct gpi_desc *desc,
+ struct scatterlist *sgl,
+ enum dma_transfer_direction direction)
+{
+ struct gpi_spi_config *spi = chan->config;
+ struct device *dev = chan->gpii->gpi_dev->dev;
+ unsigned int tre_idx = 0;
+ struct gpi_tre *tre;
+ u32 qspi_flags;
+ dma_addr_t address;
+ int len;
+
+ /* Config TRE (TX channel only) */
+ if (direction == DMA_MEM_TO_DEV && spi->set_config) {
+ qspi_add_config_tre(spi, &desc->tre[tre_idx]);
+ tre_idx++;
+ }
+
+ /* Go TRE (TX channel only) */
+ if (direction == DMA_MEM_TO_DEV) {
+ tre = &desc->tre[tre_idx];
+ tre_idx++;
+
+ qspi_flags = spi->qspi_lane_flags;
+ if (spi->fragmentation)
+ qspi_flags |= BIT(6);
+
+ tre->dword[0] = u32_encode_bits(qspi_flags, TRE_QSPI_GO_FLAGS);
+ tre->dword[0] |= u32_encode_bits(spi->cs, TRE_SPI_GO_CS);
+ tre->dword[0] |= u32_encode_bits(spi->cmd, TRE_SPI_GO_CMD);
+
+ tre->dword[1] = 0;
+ tre->dword[2] = u32_encode_bits(spi->rx_len, TRE_RX_LEN);
+
+ tre->dword[3] = u32_encode_bits(TRE_TYPE_GO, TRE_FLAGS_TYPE);
+ if (spi->cmd == SPI_RX) {
+ tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_IEOB);
+ tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_LINK);
+ } else if (spi->cmd == SPI_TX) {
+ tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_CHAIN);
+ } else { /* SPI_TX_RX or SPI_DUPLEX */
+ tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_CHAIN);
+ if (spi->rx_len > 0)
+ tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_LINK);
+ }
+ }
+
+ /* DMA TRE - skip for SPI_RX on TX channel */
+ if (direction == DMA_MEM_TO_DEV && spi->cmd == SPI_RX)
+ goto skip_dma_tre;
+
+ tre = &desc->tre[tre_idx];
+ tre_idx++;
+
+ address = sg_dma_address(sgl);
+ len = sg_dma_len(sgl);
+
+ /*
+ * For QSPI TX_RX, limit TX DMA to command bytes only.
+ * The SE parses the TX data as opcode+address and uses rx_len
+ * from the Go TRE for the receive phase. Sending more TX bytes
+ * than the command length confuses the SE's QSPI state machine.
+ */
+ if (direction == DMA_MEM_TO_DEV && spi->cmd == SPI_TX_RX &&
+ spi->tx_cmd_len > 0 && spi->tx_cmd_len < len)
+ len = spi->tx_cmd_len;
+
+ if (direction == DMA_MEM_TO_DEV && len <= 2 * sizeof(tre->dword[0])) {
+ tre->dword[0] = 0;
+ tre->dword[1] = 0;
+ memcpy(&tre->dword[0], sg_virt(sgl), len);
+ tre->dword[2] = u32_encode_bits(len, TRE_DMA_IMMEDIATE_LEN);
+ tre->dword[3] = u32_encode_bits(TRE_TYPE_IMMEDIATE_DMA, TRE_FLAGS_TYPE);
+ } else {
+ tre->dword[0] = lower_32_bits(address);
+ tre->dword[1] = upper_32_bits(address);
+ tre->dword[2] = u32_encode_bits(len, TRE_DMA_LEN);
+ tre->dword[3] = u32_encode_bits(TRE_TYPE_DMA, TRE_FLAGS_TYPE);
+ }
+ tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_IEOT);
+
+skip_dma_tre:
+ for (len = 0; len < tre_idx; len++)
+ dev_dbg(dev, "QSPI TRE:%d %x:%x:%x:%x\n", len,
+ desc->tre[len].dword[0], desc->tre[len].dword[1],
+ desc->tre[len].dword[2], desc->tre[len].dword[3]);
+
+ return tre_idx;
+}
+
static int gpi_create_spi_tre(struct gchan *chan, struct gpi_desc *desc,
struct scatterlist *sgl, enum dma_transfer_direction direction)
{
@@ -1841,7 +1966,9 @@
return NULL;
/* create TREs for xfer */
- if (gchan->protocol == QCOM_GPI_SPI) {
+ if (gchan->protocol == QCOM_GPI_QSPI) {
+ i = gpi_create_qspi_tre(gchan, gpi_desc, sgl, direction);
+ } else if (gchan->protocol == QCOM_GPI_SPI) {
i = gpi_create_spi_tre(gchan, gpi_desc, sgl, direction);
} else if (gchan->protocol == QCOM_GPI_I2C) {
i = gpi_create_i2c_tre(gchan, gpi_desc, sgl, direction, flags);
diff -ruN a/include/dt-bindings/dma/qcom-gpi.h b/include/dt-bindings/dma/qcom-gpi.h
--- a/include/dt-bindings/dma/qcom-gpi.h
+++ b/include/dt-bindings/dma/qcom-gpi.h
@@ -7,5 +7,6 @@
#define QCOM_GPI_SPI 1
#define QCOM_GPI_UART 2
#define QCOM_GPI_I2C 3
+#define QCOM_GPI_QSPI 4
#endif /* __DT_BINDINGS_DMA_QCOM_GPI_H__ */
diff -ruN a/include/linux/dma/qcom-gpi-dma.h b/include/linux/dma/qcom-gpi-dma.h
--- a/include/linux/dma/qcom-gpi-dma.h
+++ b/include/linux/dma/qcom-gpi-dma.h
@@ -13,6 +13,7 @@
SPI_TX = 1,
SPI_RX,
SPI_DUPLEX,
+ SPI_TX_RX = 7, /* QSPI full-duplex (TX opcode+addr, RX data) */
};
/**
@@ -44,6 +45,11 @@
u32 clk_src;
enum spi_transfer_cmd cmd;
u32 rx_len;
+ /* QSPI extensions */
+ bool qspi_mode;
+ u16 qspi_lane_flags;
+ u8 dummy_clk_cnt;
+ u16 tx_cmd_len;
};
enum i2c_op {
@@ -0,0 +1,354 @@
diff -ruN a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -2,6 +2,7 @@
// Copyright (c) 2017-2018, The Linux foundation. All rights reserved.
#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/dma/qcom-gpi-dma.h>
@@ -75,9 +76,68 @@
#define GSI_CPHA BIT(4)
#define GSI_CPOL BIT(5)
+/* QSPI 1-4-4 support (added on top of the standard SPI controller). */
+#define QSPI_SE_PROTO 9
+
+#define GENI_IO_MUX_1_EN BIT(1)
+#define GENI_IO_MUX_2_EN BIT(2)
+#define GENI_IO_MUX_3_EN BIT(3)
+#define GENI_QSPI_IO_MUX_EN (GENI_IO_MUX_0_EN | GENI_IO_MUX_1_EN | \
+ GENI_IO_MUX_2_EN | GENI_IO_MUX_3_EN)
+
+#define SE_GSI_EVENT_EN 0xe18
+#define SE_IRQ_EN 0xe1c
+#define SE_DMA_TX_IRQ_CLR 0xc44
+#define SE_DMA_TX_IRQ_EN_SET 0xc4c
+#define SE_DMA_TX_IRQ_EN_CLR 0xc50
+#define SE_DMA_RX_IRQ_CLR 0xd44
+#define SE_DMA_RX_IRQ_EN_SET 0xd4c
+#define SE_DMA_RX_IRQ_EN_CLR 0xd50
+
+#define DMA_RX_EVENT_EN BIT(0)
+#define DMA_TX_EVENT_EN BIT(1)
+#define GENI_M_EVENT_EN BIT(2)
+#define GENI_S_EVENT_EN BIT(3)
+
+#define QSPI_M_IRQ_EN_GPI 0x33c00046
+#define QSPI_S_IRQ_EN_GPI 0x03001e06
+#define QSPI_DMA_TX_IRQ_EN 0x0d
+#define QSPI_DMA_RX_IRQ_EN 0x1d
+
+#define QSPI_SINGLE_SDR 0x000
+#define QSPI_QUAD_SDR BIT(9)
+
+/* Defaults for a quad read with 1-byte opcode + 3-byte address. */
+#define QSPI_DEFAULT_READ_OPCODE 0xEB
+#define QSPI_DEFAULT_DUMMY_CLK_CNT 8
+#define QSPI_DEFAULT_TX_CMD_LEN 4
+#define QSPI_DEFAULT_MAX_SPEED_HZ 20000000
+
+/**
+ * struct spi_geni_data - per-compatible behavioral flags
+ * @qspi_mode: controller runs in QSPI 1-4-4 mode with 4 data lanes
+ */
+struct spi_geni_data {
+ bool qspi_mode;
+};
+
+/**
+ * struct spi_geni_qspi_params - per-SE QSPI tunables read from DT
+ * @read_opcode: first TX byte that identifies a read transfer (e.g. 0xEB)
+ * @dummy_clk_cnt: dummy clocks inserted between address and read data
+ * @tx_cmd_len: number of TX bytes forming the read command (opcode+address)
+ */
+struct spi_geni_qspi_params {
+ u32 read_opcode;
+ u32 dummy_clk_cnt;
+ u32 tx_cmd_len;
+};
+
struct spi_geni_master {
struct geni_se se;
struct device *dev;
+ const struct spi_geni_data *data;
+ struct spi_geni_qspi_params qspi;
u32 tx_fifo_depth;
u32 fifo_width_bits;
u32 tx_wm;
@@ -104,6 +164,52 @@
int cur_xfer_mode;
};
+static inline bool spi_geni_is_qspi(const struct spi_geni_master *mas)
+{
+ return mas->data && mas->data->qspi_mode;
+}
+
+/* Enable all 4 data lanes on the GENI output mux for QSPI. */
+static void qspi_setup_io_mux(struct spi_geni_master *mas)
+{
+ struct geni_se *se = &mas->se;
+ u32 out;
+
+ out = readl(se->base + GENI_OUTPUT_CTRL);
+ out |= GENI_QSPI_IO_MUX_EN;
+ writel(out, se->base + GENI_OUTPUT_CTRL);
+}
+
+/*
+ * Reprogram the SE IRQ / DMA / event registers for GPI DMA.
+ *
+ * geni_se_resources_on() (called from runtime_resume) writes a fixed set of
+ * values into SE_IRQ_EN that are correct for FIFO/SE_DMA mode but clobber the
+ * GPI configuration. The QSPI SE_PROTO (9) also needs different masks than
+ * the defaults. Call this from prepare_message (post resume, pre transfer)
+ * to restore the GPI-friendly values.
+ */
+static void prep_se_for_gpi_dma(struct spi_geni_master *mas)
+{
+ void __iomem *base = mas->se.base;
+
+ writel(GENI_DMA_MODE_EN, base + SE_GENI_DMA_MODE_EN);
+ writel(0, base + SE_IRQ_EN);
+ writel(DMA_RX_EVENT_EN | DMA_TX_EVENT_EN |
+ GENI_M_EVENT_EN | GENI_S_EVENT_EN,
+ base + SE_GSI_EVENT_EN);
+ writel(QSPI_M_IRQ_EN_GPI, base + SE_GENI_M_IRQ_EN);
+ writel(QSPI_S_IRQ_EN_GPI, base + SE_GENI_S_IRQ_EN);
+ writel(0xf, base + SE_DMA_TX_IRQ_EN_CLR);
+ writel(QSPI_DMA_TX_IRQ_EN, base + SE_DMA_TX_IRQ_EN_SET);
+ writel(0xfff, base + SE_DMA_RX_IRQ_EN_CLR);
+ writel(QSPI_DMA_RX_IRQ_EN, base + SE_DMA_RX_IRQ_EN_SET);
+ writel(0xffc07fff, base + SE_GENI_M_IRQ_CLEAR);
+ writel(0x0fc07f3f, base + SE_GENI_S_IRQ_CLEAR);
+ writel(0xf, base + SE_DMA_TX_IRQ_CLR);
+ writel(0xfff, base + SE_DMA_RX_IRQ_CLR);
+}
+
static void spi_slv_setup(struct spi_geni_master *mas)
{
struct geni_se *se = &mas->se;
@@ -411,7 +517,20 @@
}
if (xfer->tx_buf && xfer->rx_buf) {
- peripheral.cmd = SPI_DUPLEX;
+ /*
+ * QSPI uses SPI_TX_RX (7) for TX-opcode-then-RX-data transfers;
+ * standard SPI uses SPI_DUPLEX (3) for true full-duplex.
+ *
+ * For QSPI_TX_RX the GSI firmware needs an explicit rx_len so
+ * it knows how many bytes to clock in after the TX command
+ * phase. SPI_DUPLEX uses xfer->len implicitly.
+ */
+ if (spi_geni_is_qspi(mas)) {
+ peripheral.cmd = SPI_TX_RX;
+ peripheral.rx_len = (xfer->len << 3) / xfer->bits_per_word;
+ } else {
+ peripheral.cmd = SPI_DUPLEX;
+ }
} else if (xfer->tx_buf) {
peripheral.cmd = SPI_TX;
peripheral.rx_len = 0;
@@ -445,6 +564,44 @@
peripheral.fragmentation = FRAGMENTATION;
}
+ if (spi_geni_is_qspi(mas)) {
+ bool multi = !list_is_singular(&spi->cur_msg->transfers);
+
+ peripheral.qspi_mode = true;
+
+ /*
+ * In a multi-transfer message the first write uses SINGLE_SDR
+ * (opcode+addr lane transition) while subsequent TX-only
+ * transfers stay in QUAD.
+ */
+ if (multi && xfer->tx_buf && !xfer->rx_buf &&
+ &xfer->transfer_list == spi->cur_msg->transfers.next)
+ peripheral.qspi_lane_flags = QSPI_SINGLE_SDR;
+ else
+ peripheral.qspi_lane_flags = QSPI_QUAD_SDR;
+
+ if (peripheral.cmd == SPI_TX_RX && xfer->tx_buf) {
+ const u8 *tx = xfer->tx_buf;
+
+ /*
+ * The configured read opcode is followed by an address
+ * then dummy clocks before the device drives the data
+ * lanes. Any other first byte means the host is issuing
+ * a write command, so downgrade to TX-only.
+ */
+ if (tx[0] == mas->qspi.read_opcode) {
+ peripheral.dummy_clk_cnt = mas->qspi.dummy_clk_cnt;
+ peripheral.tx_cmd_len = mas->qspi.tx_cmd_len;
+ } else {
+ peripheral.cmd = SPI_TX;
+ peripheral.dummy_clk_cnt = 0;
+ peripheral.rx_len = 0;
+ }
+ } else if (peripheral.cmd == SPI_RX) {
+ peripheral.dummy_clk_cnt = mas->qspi.dummy_clk_cnt;
+ }
+ }
+
if (peripheral.cmd & SPI_RX) {
dmaengine_slave_config(mas->rx, &config);
rx_desc = dmaengine_prep_slave_sg(mas->rx, xfer->rx_sg.sgl, xfer->rx_sg.nents,
@@ -467,8 +624,18 @@
return -EIO;
}
- tx_desc->callback_result = spi_gsi_callback_result;
- tx_desc->callback_param = spi;
+ /*
+ * In QSPI mode the Go TRE on the TX channel has no DMA TRE for SPI_RX
+ * (and for TX_RX completes TX-side early), so the real completion
+ * event is the IEOT on the RX channel. Attach the callback there.
+ */
+ if (spi_geni_is_qspi(mas) && (peripheral.cmd & SPI_RX)) {
+ rx_desc->callback_result = spi_gsi_callback_result;
+ rx_desc->callback_param = spi;
+ } else {
+ tx_desc->callback_result = spi_gsi_callback_result;
+ tx_desc->callback_param = spi;
+ }
if (peripheral.cmd & SPI_RX)
dmaengine_submit(rx_desc);
@@ -534,7 +701,13 @@
return ret;
case GENI_GPI_DMA:
- /* nothing to do for GPI DMA */
+ /*
+ * In QSPI mode, runtime_resume's call to geni_se_resources_on()
+ * clobbers the GPI-specific IRQ/DMA register layout. Restore
+ * it before every message.
+ */
+ if (spi_geni_is_qspi(mas))
+ prep_se_for_gpi_dma(mas);
return 0;
}
@@ -609,6 +782,18 @@
goto out_pm;
}
spi_slv_setup(mas);
+ } else if (spi_geni_is_qspi(mas)) {
+ /*
+ * QSPI SEs report protocol 9 in hardware. The GENI_SE_SPI
+ * firmware loader cannot be used here (no firmware for proto 9
+ * is shipped), so reject anything else.
+ */
+ if (proto != QSPI_SE_PROTO) {
+ dev_err(mas->dev, "Expected QSPI proto %d, got %d\n",
+ QSPI_SE_PROTO, proto);
+ goto out_pm;
+ }
+ qspi_setup_io_mux(mas);
} else if (proto == GENI_SE_INVALID_PROTO) {
ret = geni_load_se_firmware(se, GENI_SE_SPI);
if (ret) {
@@ -640,9 +825,28 @@
else
mas->oversampling = 1;
+ /*
+ * QSPI SEs cannot use FIFO mode (the FIFO path would try to drive
+ * a single-lane word format on a 4-lane QSPI bus), so force GPI DMA
+ * regardless of what GENI_IF_DISABLE_RO reports.
+ */
fifo_disable = readl(se->base + GENI_IF_DISABLE_RO) & FIFO_IF_DISABLE;
+ if (spi_geni_is_qspi(mas))
+ fifo_disable = 1;
+
switch (fifo_disable) {
case 1:
+ /*
+ * For QSPI, bring the SE up in SE_DMA first (which arms the
+ * DMA-related registers) and let it settle before switching
+ * to GPI_DMA and grabbing the GPII channels. Without this
+ * intermediate step the first GPI command after probe can
+ * hang on the CH STOP completion.
+ */
+ if (spi_geni_is_qspi(mas)) {
+ geni_se_select_mode(se, GENI_SE_DMA);
+ msleep(10);
+ }
ret = spi_geni_grab_gpi_chan(mas);
if (!ret) { /* success case */
mas->cur_xfer_mode = GENI_GPI_DMA;
@@ -653,6 +857,16 @@
goto out_pm;
}
/*
+ * For QSPI there is no usable FIFO fallback: FIFO mode cannot
+ * drive a 4-lane QSPI bus. Fail the probe instead of silently
+ * producing garbage.
+ */
+ if (spi_geni_is_qspi(mas)) {
+ dev_err(mas->dev, "Failed to grab GPI DMA channels for QSPI: %d\n",
+ ret);
+ goto out_pm;
+ }
+ /*
* in case of failure to get gpi dma channel, we can still do the
* FIFO mode, so fallthrough
*/
@@ -1052,11 +1266,24 @@
mas = spi_controller_get_devdata(spi);
mas->irq = irq;
mas->dev = dev;
+ mas->data = device_get_match_data(dev);
mas->se.dev = dev;
mas->se.wrapper = dev_get_drvdata(dev->parent);
mas->se.base = base;
mas->se.clk = clk;
+ if (spi_geni_is_qspi(mas)) {
+ mas->qspi.read_opcode = QSPI_DEFAULT_READ_OPCODE;
+ mas->qspi.dummy_clk_cnt = QSPI_DEFAULT_DUMMY_CLK_CNT;
+ mas->qspi.tx_cmd_len = QSPI_DEFAULT_TX_CMD_LEN;
+ device_property_read_u32(dev, "qcom,qspi-read-opcode",
+ &mas->qspi.read_opcode);
+ device_property_read_u32(dev, "qcom,qspi-read-dummy-clocks",
+ &mas->qspi.dummy_clk_cnt);
+ device_property_read_u32(dev, "qcom,qspi-read-cmd-bytes",
+ &mas->qspi.tx_cmd_len);
+ }
+
ret = devm_pm_opp_set_clkname(&pdev->dev, "se");
if (ret)
return ret;
@@ -1069,9 +1296,12 @@
spi->bus_num = -1;
spi->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP | SPI_CS_HIGH;
+ if (spi_geni_is_qspi(mas))
+ spi->mode_bits |= SPI_TX_QUAD | SPI_RX_QUAD;
spi->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
spi->num_chipselect = 4;
- spi->max_speed_hz = 50000000;
+ spi->max_speed_hz = spi_geni_is_qspi(mas) ? QSPI_DEFAULT_MAX_SPEED_HZ
+ : 50000000;
spi->max_dma_len = 0xffff0; /* 24 bits for tx/rx dma length */
spi->prepare_message = spi_geni_prepare_message;
spi->transfer_one = spi_geni_transfer_one;
@@ -1197,8 +1427,13 @@
SET_SYSTEM_SLEEP_PM_OPS(spi_geni_suspend, spi_geni_resume)
};
+static const struct spi_geni_data spi_geni_qspi_data = {
+ .qspi_mode = true,
+};
+
static const struct of_device_id spi_geni_dt_match[] = {
{ .compatible = "qcom,geni-spi" },
+ { .compatible = "qcom,geni-spi-qspi", .data = &spi_geni_qspi_data },
{}
};
MODULE_DEVICE_TABLE(of, spi_geni_dt_match);
+19
View File
@@ -0,0 +1,19 @@
#
# Copyright (c) 2020 Microsoft Corporation
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 as published by
# the Free Software Foundation.
#
config SPI_HID
tristate "HID over SPI transport layer"
default n
help
Say Y here if you use a keyboard, a touchpad, a touchscreen, or any
other HID based devices which is connected to your computer via SPI.
If unsure, say N.
This support is also available as a module. If so, the module
will be called spi-hid.
+16
View File
@@ -0,0 +1,16 @@
ifneq ($(KERNELRELEASE),)
obj-m += spi-hid.o
spi-hid-objs := spi-hid-core.o
else
KERNEL_DIR ?= /lib/modules/$(shell uname -r)/build
PWD := $(shell pwd)
all:
$(MAKE) -C $(KERNEL_DIR) M=$(PWD) modules
clean:
$(MAKE) -C $(KERNEL_DIR) M=$(PWD) clean
install:
$(MAKE) -C $(KERNEL_DIR) M=$(PWD) modules_install
endif
+11
View File
@@ -0,0 +1,11 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Stub hid-ids.h for out-of-tree spi-hid build
* The actual hid-ids.h is not needed by spi-hid driver
*/
#ifndef HID_IDS_H_FILE
#define HID_IDS_H_FILE
/* Empty stub - spi-hid doesn't use any HID vendor/device IDs */
#endif
File diff suppressed because it is too large Load Diff
+148
View File
@@ -0,0 +1,148 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* HID over SPI (HIDSPI v3) transport driver for QSPI touchpads.
*
* Based on Microsoft's spi-hid v2 driver.
* Copyright (c) 2020 Microsoft Corporation
*/
#ifndef SPI_HID_CORE_H
#define SPI_HID_CORE_H
#include <linux/kernel.h>
#include <linux/completion.h>
#include <linux/spi/spi.h>
#include <linux/types.h>
#define SPI_HID_INPUT_HEADER_SYNC_BYTE 0x5A
#define SPI_HID_INPUT_HEADER_VERSION 0x03
#define SPI_HID_QSPI_READ_OPCODE 0xEB
#define SPI_HID_QSPI_WRITE_OPCODE 0xE2
#define SPI_HID_QSPI_CMD_LEN 4
#define SPI_HID_INPUT_HDR_ADDR 0x1000
#define SPI_HID_INPUT_BDY_ADDR 0x1004
#define SPI_HID_OUTPUT_ADDR 0x2000
#define SPI_HID_SUPPORTED_VERSION 0x0300
#define SPI_HID_BODY_HEADER_LEN 4
/* Input report types (device -> host) */
#define SPI_HID_REPORT_TYPE_DATA 0x01
#define SPI_HID_REPORT_TYPE_RESET_RESP 0x03
#define SPI_HID_REPORT_TYPE_COMMAND_RESP 0x04
#define SPI_HID_REPORT_TYPE_GET_FEATURE_RESP 0x05
#define SPI_HID_REPORT_TYPE_DEVICE_DESC 0x07
#define SPI_HID_REPORT_TYPE_REPORT_DESC 0x08
#define SPI_HID_REPORT_TYPE_SET_FEATURE_RESP 0x09
#define SPI_HID_REPORT_TYPE_OUTPUT_REPORT_RESP 0x0A
#define SPI_HID_REPORT_TYPE_GET_INPUT_RESP 0x0B
/* Output report types (host -> device) */
#define SPI_HID_OUT_DEVICE_DESC 0x01
#define SPI_HID_OUT_REPORT_DESC 0x02
#define SPI_HID_OUT_SET_FEATURE 0x03
#define SPI_HID_OUT_GET_FEATURE 0x04
#define SPI_HID_OUT_OUTPUT_REPORT 0x05
#define SPI_HID_OUT_GET_INPUT_REPORT 0x06
#define SPI_HID_OUT_COMMAND_CONTENT 0x07
#define SPI_HID_POWER_MODE_ACTIVE 0x01
#define SPI_HID_POWER_MODE_SLEEP 0x02
#define SPI_HID_POWER_MODE_OFF 0x03
#define SPI_HID_RESET_ASSERT_MS 300
#define SPI_HID_POST_DIR_DELAY_MS 25
#define SPI_HID_RESPONSE_TIMEOUT_MS 2000
#define SPI_HID_MAX_RESET_ATTEMPTS 3
#define SPI_HID_MAX_INIT_RETRIES 10
#define SPI_HID_INPUT_HEADER_LEN 4
#define SPI_HID_MAX_INPUT_LEN SZ_8K
struct spi_hid_device_desc_raw {
__le16 wDeviceDescLength;
__le16 bcdVersion;
__le16 wReportDescLength;
__le16 wMaxInputLength;
__le16 wMaxOutputLength;
__le16 wMaxFragmentLength;
__le16 wVendorID;
__le16 wProductID;
__le16 wVersionID;
__le16 wFlags;
__u8 reserved[4];
} __packed;
/* Parsed device descriptor */
struct spi_hid_device_descriptor {
u16 hid_version;
u16 report_descriptor_length;
u16 max_input_length;
u16 max_output_length;
u16 max_fragment_length;
u16 vendor_id;
u16 product_id;
u16 version_id;
u16 flags;
};
struct spi_hid_input_header {
u8 version;
u16 body_len;
u8 last_frag;
u8 sync_const;
};
struct spi_hid_body_header {
u8 report_type;
u16 content_length;
u8 content_id;
};
struct spi_hid {
struct spi_device *spi;
struct hid_device *hid;
struct spi_hid_device_descriptor desc;
u8 *resp_buf;
int resp_len;
u8 resp_type;
u8 power_state;
u8 attempts;
bool ready;
bool irq_enabled;
struct regulator *supply;
struct pinctrl *pinctrl;
struct pinctrl_state *pinctrl_reset;
struct pinctrl_state *pinctrl_active;
struct pinctrl_state *pinctrl_sleep;
struct work_struct reset_work;
struct work_struct create_device_work;
struct work_struct refresh_device_work;
struct mutex lock;
struct completion output_done;
u32 bus_error_count;
int bus_last_error;
u32 dir_count;
u32 powered;
u8 *rd_buf;
int rd_len;
u8 *irq_hdr_tx;
u8 *irq_hdr_rx;
u8 *irq_bdy_tx;
u8 *irq_bdy_rx;
};
#endif
+200
View File
@@ -0,0 +1,200 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* spi-hid_trace.h
*
* Copyright (c) 2020 Microsoft Corporation
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM spi_hid
#if !defined(_SPI_HID_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
#define _SPI_HID_TRACE_H
#include <linux/types.h>
#include <linux/tracepoint.h>
#include "spi-hid-core.h"
DECLARE_EVENT_CLASS(spi_hid_transfer,
TP_PROTO(struct spi_hid *shid, const void *tx_buf, int tx_len,
const void *rx_buf, u16 rx_len, int ret),
TP_ARGS(shid, tx_buf, tx_len, rx_buf, rx_len, ret),
TP_STRUCT__entry(
__field(int, bus_num)
__field(int, chip_select)
__field(int, len)
__field(int, ret)
__dynamic_array(u8, rx_buf, rx_len)
__dynamic_array(u8, tx_buf, tx_len)
),
TP_fast_assign(
__entry->bus_num = shid->spi->controller->bus_num;
__entry->chip_select = shid->spi->chip_select;
__entry->len = rx_len + tx_len;
__entry->ret = ret;
memcpy(__get_dynamic_array(tx_buf), tx_buf, tx_len);
memcpy(__get_dynamic_array(rx_buf), rx_buf, rx_len);
),
TP_printk("spi%d.%d: len=%d tx=[%*phD] rx=[%*phD] --> %d",
__entry->bus_num, __entry->chip_select, __entry->len,
__get_dynamic_array_len(tx_buf), __get_dynamic_array(tx_buf),
__get_dynamic_array_len(rx_buf), __get_dynamic_array(rx_buf),
__entry->ret)
);
DEFINE_EVENT(spi_hid_transfer, spi_hid_input_async,
TP_PROTO(struct spi_hid *shid, const void *tx_buf, int tx_len,
const void *rx_buf, u16 rx_len, int ret),
TP_ARGS(shid, tx_buf, tx_len, rx_buf, rx_len, ret)
);
DEFINE_EVENT(spi_hid_transfer, spi_hid_input_header_complete,
TP_PROTO(struct spi_hid *shid, const void *tx_buf, int tx_len,
const void *rx_buf, u16 rx_len, int ret),
TP_ARGS(shid, tx_buf, tx_len, rx_buf, rx_len, ret)
);
DEFINE_EVENT(spi_hid_transfer, spi_hid_input_body_complete,
TP_PROTO(struct spi_hid *shid, const void *tx_buf, int tx_len,
const void *rx_buf, u16 rx_len, int ret),
TP_ARGS(shid, tx_buf, tx_len, rx_buf, rx_len, ret)
);
DEFINE_EVENT(spi_hid_transfer, spi_hid_output_begin,
TP_PROTO(struct spi_hid *shid, const void *tx_buf, int tx_len,
const void *rx_buf, u16 rx_len, int ret),
TP_ARGS(shid, tx_buf, tx_len, rx_buf, rx_len, ret)
);
DEFINE_EVENT(spi_hid_transfer, spi_hid_output_end,
TP_PROTO(struct spi_hid *shid, const void *tx_buf, int tx_len,
const void *rx_buf, u16 rx_len, int ret),
TP_ARGS(shid, tx_buf, tx_len, rx_buf, rx_len, ret)
);
DECLARE_EVENT_CLASS(spi_hid_irq,
TP_PROTO(struct spi_hid *shid, int irq),
TP_ARGS(shid, irq),
TP_STRUCT__entry(
__field(int, bus_num)
__field(int, chip_select)
__field(int, irq)
),
TP_fast_assign(
__entry->bus_num = shid->spi->controller->bus_num;
__entry->chip_select = shid->spi->chip_select;
__entry->irq = irq;
),
TP_printk("spi%d.%d: IRQ %d",
__entry->bus_num, __entry->chip_select, __entry->irq)
);
DEFINE_EVENT(spi_hid_irq, spi_hid_dev_irq,
TP_PROTO(struct spi_hid *shid, int irq),
TP_ARGS(shid, irq)
);
DECLARE_EVENT_CLASS(spi_hid,
TP_PROTO(struct spi_hid *shid),
TP_ARGS(shid),
TP_STRUCT__entry(
__field(int, bus_num)
__field(int, chip_select)
__field(int, input_stage)
__field(int, power_state)
__field(u32, input_transfer_pending)
__field(bool, ready)
__field(int, vendor_id)
__field(int, product_id)
__field(int, max_input_length)
__field(int, max_output_length)
__field(u16, hid_version)
__field(u16, report_descriptor_length)
__field(u16, version_id)
),
TP_fast_assign(
__entry->bus_num = shid->spi->controller->bus_num;
__entry->chip_select = shid->spi->chip_select;
__entry->input_stage = shid->input_stage;
__entry->power_state = shid->power_state;
__entry->input_transfer_pending = shid->input_transfer_pending;
__entry->ready = shid->ready;
__entry->vendor_id = shid->desc.vendor_id;
__entry->product_id = shid->desc.product_id;
__entry->max_input_length = shid->desc.max_input_length;
__entry->max_output_length = shid->desc.max_output_length;
__entry->hid_version = shid->desc.hid_version;
__entry->report_descriptor_length = shid->desc.report_descriptor_length;
__entry->version_id = shid->desc.version_id;
),
TP_printk("spi%d.%d: (%04x:%04x v%d) HID v%d.%d state i:%d p:%d len i:%d o:%d r:%d flags %c:%d",
__entry->bus_num, __entry->chip_select, __entry->vendor_id,
__entry->product_id, __entry->version_id,
__entry->hid_version >> 8, __entry->hid_version & 0xff,
__entry->input_stage, __entry->power_state,
__entry->max_input_length, __entry->max_output_length,
__entry->report_descriptor_length,
__entry->ready ? 'R' : 'r',
__entry->input_transfer_pending)
);
DEFINE_EVENT(spi_hid, spi_hid_bus_input_report,
TP_PROTO(struct spi_hid *shid),
TP_ARGS(shid)
);
DEFINE_EVENT(spi_hid, spi_hid_process_input_report,
TP_PROTO(struct spi_hid *shid),
TP_ARGS(shid)
);
DEFINE_EVENT(spi_hid, spi_hid_input_report_handler,
TP_PROTO(struct spi_hid *shid),
TP_ARGS(shid)
);
DEFINE_EVENT(spi_hid, spi_hid_reset_work,
TP_PROTO(struct spi_hid *shid),
TP_ARGS(shid)
);
DEFINE_EVENT(spi_hid, spi_hid_create_device_work,
TP_PROTO(struct spi_hid *shid),
TP_ARGS(shid)
);
DEFINE_EVENT(spi_hid, spi_hid_refresh_device_work,
TP_PROTO(struct spi_hid *shid),
TP_ARGS(shid)
);
DEFINE_EVENT(spi_hid, spi_hid_response_handler,
TP_PROTO(struct spi_hid *shid),
TP_ARGS(shid)
);
#endif /* _SPI_HID_TRACE_H */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE spi-hid_trace
#include <trace/define_trace.h>
+11
View File
@@ -0,0 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
/**
* trace.c - SPI HID Trace Support
*
* Copyright (C) 2020 Microsoft Corporation
*
* Author: Felipe Balbi <felipe.balbi@microsoft.com>
*/
#define CREATE_TRACE_POINTS
#include "spi-hid_trace.h"