This commit is contained in:
2026-04-10 15:45:53 +04:00
commit 95f3f072da
24 changed files with 3526 additions and 0 deletions
+197
View File
@@ -0,0 +1,197 @@
diff -ruN a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c
--- a/drivers/dma/qcom/gpi.c
+++ b/drivers/dma/qcom/gpi.c
@@ -48,6 +48,12 @@
#define TRE_SPI_GO_CS GENMASK(10, 8)
#define TRE_SPI_GO_FRAG BIT(26)
+/* QSPI GO WD0 - flags field is 12 bits at [31:20] instead of SPI's 8 bits */
+#define TRE_QSPI_GO_FLAGS GENMASK(31, 20)
+
+/* QSPI Config0 WD0 - dummy clock count */
+#define TRE_SPI_C0_DUMMY_CLK GENMASK(21, 14)
+
/* GO WD2 */
#define TRE_RX_LEN GENMASK(23, 0)
@@ -1275,8 +1281,17 @@
upper_32_bits(ring->phys_addr));
gpi_write_reg(gpii, chan->ch_cntxt_db_reg + CNTXT_5_RING_RP_MSB - CNTXT_4_RING_RP_LSB,
upper_32_bits(ring->phys_addr));
- gpi_write_reg(gpii, gpii->regs + GPII_n_CH_k_SCRATCH_0_OFFS(id, chid),
- GPII_n_CH_k_SCRATCH_0(pair_chid, chan->protocol, chan->seid));
+ /*
+ * For QSPI, use the SE's native protocol value (9) in SCRATCH_0.
+ * The DT binding uses QCOM_GPI_QSPI=4 for channel selection, but
+ * the GSI firmware expects the actual SE protocol ID.
+ */
+ {
+ u32 hw_proto = chan->protocol == QCOM_GPI_QSPI ?
+ 9 : chan->protocol;
+ gpi_write_reg(gpii, gpii->regs + GPII_n_CH_k_SCRATCH_0_OFFS(id, chid),
+ GPII_n_CH_k_SCRATCH_0(pair_chid, hw_proto, chan->seid));
+ }
gpi_write_reg(gpii, gpii->regs + GPII_n_CH_k_SCRATCH_1_OFFS(id, chid), 0);
gpi_write_reg(gpii, gpii->regs + GPII_n_CH_k_SCRATCH_2_OFFS(id, chid), 0);
gpi_write_reg(gpii, gpii->regs + GPII_n_CH_k_SCRATCH_3_OFFS(id, chid), 0);
@@ -1699,6 +1714,116 @@
return tre_idx;
}
+static void qspi_add_config_tre(struct gpi_spi_config *spi,
+ struct gpi_tre *tre)
+{
+ tre->dword[0] = u32_encode_bits(spi->word_len, TRE_SPI_C0_WORD_SZ);
+ tre->dword[0] |= u32_encode_bits(spi->loopback_en, TRE_SPI_C0_LOOPBACK);
+ tre->dword[0] |= u32_encode_bits(spi->clock_pol_high, TRE_SPI_C0_CPOL);
+ tre->dword[0] |= u32_encode_bits(spi->data_pol_high, TRE_SPI_C0_CPHA);
+ tre->dword[0] |= u32_encode_bits(spi->pack_en, TRE_SPI_C0_TX_PACK);
+ tre->dword[0] |= u32_encode_bits(spi->pack_en, TRE_SPI_C0_RX_PACK);
+ tre->dword[0] |= u32_encode_bits(spi->dummy_clk_cnt, TRE_SPI_C0_DUMMY_CLK);
+
+ tre->dword[1] = 0;
+
+ tre->dword[2] = u32_encode_bits(spi->clk_div, TRE_C0_CLK_DIV);
+ tre->dword[2] |= u32_encode_bits(spi->clk_src, TRE_C0_CLK_SRC);
+
+ tre->dword[3] = u32_encode_bits(TRE_TYPE_CONFIG0, TRE_FLAGS_TYPE);
+ tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_CHAIN);
+}
+
+static int gpi_create_qspi_tre(struct gchan *chan, struct gpi_desc *desc,
+ struct scatterlist *sgl,
+ enum dma_transfer_direction direction)
+{
+ struct gpi_spi_config *spi = chan->config;
+ struct device *dev = chan->gpii->gpi_dev->dev;
+ unsigned int tre_idx = 0;
+ struct gpi_tre *tre;
+ u32 qspi_flags;
+ dma_addr_t address;
+ int len;
+
+ /* Config TRE (TX channel only) */
+ if (direction == DMA_MEM_TO_DEV && spi->set_config) {
+ qspi_add_config_tre(spi, &desc->tre[tre_idx]);
+ tre_idx++;
+ }
+
+ /* Go TRE (TX channel only) */
+ if (direction == DMA_MEM_TO_DEV) {
+ tre = &desc->tre[tre_idx];
+ tre_idx++;
+
+ qspi_flags = spi->qspi_lane_flags;
+ if (spi->fragmentation)
+ qspi_flags |= BIT(6);
+
+ tre->dword[0] = u32_encode_bits(qspi_flags, TRE_QSPI_GO_FLAGS);
+ tre->dword[0] |= u32_encode_bits(spi->cs, TRE_SPI_GO_CS);
+ tre->dword[0] |= u32_encode_bits(spi->cmd, TRE_SPI_GO_CMD);
+
+ tre->dword[1] = 0;
+ tre->dword[2] = u32_encode_bits(spi->rx_len, TRE_RX_LEN);
+
+ tre->dword[3] = u32_encode_bits(TRE_TYPE_GO, TRE_FLAGS_TYPE);
+ if (spi->cmd == SPI_RX) {
+ tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_IEOB);
+ tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_LINK);
+ } else if (spi->cmd == SPI_TX) {
+ tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_CHAIN);
+ } else { /* SPI_TX_RX or SPI_DUPLEX */
+ tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_CHAIN);
+ if (spi->rx_len > 0)
+ tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_LINK);
+ }
+ }
+
+ /* DMA TRE - skip for SPI_RX on TX channel */
+ if (direction == DMA_MEM_TO_DEV && spi->cmd == SPI_RX)
+ goto skip_dma_tre;
+
+ tre = &desc->tre[tre_idx];
+ tre_idx++;
+
+ address = sg_dma_address(sgl);
+ len = sg_dma_len(sgl);
+
+ /*
+ * For QSPI TX_RX, limit TX DMA to command bytes only.
+ * The SE parses the TX data as opcode+address and uses rx_len
+ * from the Go TRE for the receive phase. Sending more TX bytes
+ * than the command length confuses the SE's QSPI state machine.
+ */
+ if (direction == DMA_MEM_TO_DEV && spi->cmd == SPI_TX_RX &&
+ spi->tx_cmd_len > 0 && spi->tx_cmd_len < len)
+ len = spi->tx_cmd_len;
+
+ if (direction == DMA_MEM_TO_DEV && len <= 2 * sizeof(tre->dword[0])) {
+ tre->dword[0] = 0;
+ tre->dword[1] = 0;
+ memcpy(&tre->dword[0], sg_virt(sgl), len);
+ tre->dword[2] = u32_encode_bits(len, TRE_DMA_IMMEDIATE_LEN);
+ tre->dword[3] = u32_encode_bits(TRE_TYPE_IMMEDIATE_DMA, TRE_FLAGS_TYPE);
+ } else {
+ tre->dword[0] = lower_32_bits(address);
+ tre->dword[1] = upper_32_bits(address);
+ tre->dword[2] = u32_encode_bits(len, TRE_DMA_LEN);
+ tre->dword[3] = u32_encode_bits(TRE_TYPE_DMA, TRE_FLAGS_TYPE);
+ }
+ tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_IEOT);
+
+skip_dma_tre:
+ for (len = 0; len < tre_idx; len++)
+ dev_dbg(dev, "QSPI TRE:%d %x:%x:%x:%x\n", len,
+ desc->tre[len].dword[0], desc->tre[len].dword[1],
+ desc->tre[len].dword[2], desc->tre[len].dword[3]);
+
+ return tre_idx;
+}
+
static int gpi_create_spi_tre(struct gchan *chan, struct gpi_desc *desc,
struct scatterlist *sgl, enum dma_transfer_direction direction)
{
@@ -1841,7 +1966,9 @@
return NULL;
/* create TREs for xfer */
- if (gchan->protocol == QCOM_GPI_SPI) {
+ if (gchan->protocol == QCOM_GPI_QSPI) {
+ i = gpi_create_qspi_tre(gchan, gpi_desc, sgl, direction);
+ } else if (gchan->protocol == QCOM_GPI_SPI) {
i = gpi_create_spi_tre(gchan, gpi_desc, sgl, direction);
} else if (gchan->protocol == QCOM_GPI_I2C) {
i = gpi_create_i2c_tre(gchan, gpi_desc, sgl, direction, flags);
diff -ruN a/include/dt-bindings/dma/qcom-gpi.h b/include/dt-bindings/dma/qcom-gpi.h
--- a/include/dt-bindings/dma/qcom-gpi.h
+++ b/include/dt-bindings/dma/qcom-gpi.h
@@ -7,5 +7,6 @@
#define QCOM_GPI_SPI 1
#define QCOM_GPI_UART 2
#define QCOM_GPI_I2C 3
+#define QCOM_GPI_QSPI 4
#endif /* __DT_BINDINGS_DMA_QCOM_GPI_H__ */
diff -ruN a/include/linux/dma/qcom-gpi-dma.h b/include/linux/dma/qcom-gpi-dma.h
--- a/include/linux/dma/qcom-gpi-dma.h
+++ b/include/linux/dma/qcom-gpi-dma.h
@@ -13,6 +13,7 @@
SPI_TX = 1,
SPI_RX,
SPI_DUPLEX,
+ SPI_TX_RX = 7, /* QSPI full-duplex (TX opcode+addr, RX data) */
};
/**
@@ -44,6 +45,11 @@
u32 clk_src;
enum spi_transfer_cmd cmd;
u32 rx_len;
+ /* QSPI extensions */
+ bool qspi_mode;
+ u16 qspi_lane_flags;
+ u8 dummy_clk_cnt;
+ u16 tx_cmd_len;
};
enum i2c_op {
@@ -0,0 +1,354 @@
diff -ruN a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -2,6 +2,7 @@
// Copyright (c) 2017-2018, The Linux foundation. All rights reserved.
#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/dma/qcom-gpi-dma.h>
@@ -75,9 +76,68 @@
#define GSI_CPHA BIT(4)
#define GSI_CPOL BIT(5)
+/* QSPI 1-4-4 support (added on top of the standard SPI controller). */
+#define QSPI_SE_PROTO 9
+
+#define GENI_IO_MUX_1_EN BIT(1)
+#define GENI_IO_MUX_2_EN BIT(2)
+#define GENI_IO_MUX_3_EN BIT(3)
+#define GENI_QSPI_IO_MUX_EN (GENI_IO_MUX_0_EN | GENI_IO_MUX_1_EN | \
+ GENI_IO_MUX_2_EN | GENI_IO_MUX_3_EN)
+
+#define SE_GSI_EVENT_EN 0xe18
+#define SE_IRQ_EN 0xe1c
+#define SE_DMA_TX_IRQ_CLR 0xc44
+#define SE_DMA_TX_IRQ_EN_SET 0xc4c
+#define SE_DMA_TX_IRQ_EN_CLR 0xc50
+#define SE_DMA_RX_IRQ_CLR 0xd44
+#define SE_DMA_RX_IRQ_EN_SET 0xd4c
+#define SE_DMA_RX_IRQ_EN_CLR 0xd50
+
+#define DMA_RX_EVENT_EN BIT(0)
+#define DMA_TX_EVENT_EN BIT(1)
+#define GENI_M_EVENT_EN BIT(2)
+#define GENI_S_EVENT_EN BIT(3)
+
+#define QSPI_M_IRQ_EN_GPI 0x33c00046
+#define QSPI_S_IRQ_EN_GPI 0x03001e06
+#define QSPI_DMA_TX_IRQ_EN 0x0d
+#define QSPI_DMA_RX_IRQ_EN 0x1d
+
+#define QSPI_SINGLE_SDR 0x000
+#define QSPI_QUAD_SDR BIT(9)
+
+/* Defaults for a quad read with 1-byte opcode + 3-byte address. */
+#define QSPI_DEFAULT_READ_OPCODE 0xEB
+#define QSPI_DEFAULT_DUMMY_CLK_CNT 8
+#define QSPI_DEFAULT_TX_CMD_LEN 4
+#define QSPI_DEFAULT_MAX_SPEED_HZ 20000000
+
+/**
+ * struct spi_geni_data - per-compatible behavioral flags
+ * @qspi_mode: controller runs in QSPI 1-4-4 mode with 4 data lanes
+ */
+struct spi_geni_data {
+ bool qspi_mode;
+};
+
+/**
+ * struct spi_geni_qspi_params - per-SE QSPI tunables read from DT
+ * @read_opcode: first TX byte that identifies a read transfer (e.g. 0xEB)
+ * @dummy_clk_cnt: dummy clocks inserted between address and read data
+ * @tx_cmd_len: number of TX bytes forming the read command (opcode+address)
+ */
+struct spi_geni_qspi_params {
+ u32 read_opcode;
+ u32 dummy_clk_cnt;
+ u32 tx_cmd_len;
+};
+
struct spi_geni_master {
struct geni_se se;
struct device *dev;
+ const struct spi_geni_data *data;
+ struct spi_geni_qspi_params qspi;
u32 tx_fifo_depth;
u32 fifo_width_bits;
u32 tx_wm;
@@ -104,6 +164,52 @@
int cur_xfer_mode;
};
+static inline bool spi_geni_is_qspi(const struct spi_geni_master *mas)
+{
+ return mas->data && mas->data->qspi_mode;
+}
+
+/* Enable all 4 data lanes on the GENI output mux for QSPI. */
+static void qspi_setup_io_mux(struct spi_geni_master *mas)
+{
+ struct geni_se *se = &mas->se;
+ u32 out;
+
+ out = readl(se->base + GENI_OUTPUT_CTRL);
+ out |= GENI_QSPI_IO_MUX_EN;
+ writel(out, se->base + GENI_OUTPUT_CTRL);
+}
+
+/*
+ * Reprogram the SE IRQ / DMA / event registers for GPI DMA.
+ *
+ * geni_se_resources_on() (called from runtime_resume) writes a fixed set of
+ * values into SE_IRQ_EN that are correct for FIFO/SE_DMA mode but clobber the
+ * GPI configuration. The QSPI SE_PROTO (9) also needs different masks than
+ * the defaults. Call this from prepare_message (post resume, pre transfer)
+ * to restore the GPI-friendly values.
+ */
+static void prep_se_for_gpi_dma(struct spi_geni_master *mas)
+{
+ void __iomem *base = mas->se.base;
+
+ writel(GENI_DMA_MODE_EN, base + SE_GENI_DMA_MODE_EN);
+ writel(0, base + SE_IRQ_EN);
+ writel(DMA_RX_EVENT_EN | DMA_TX_EVENT_EN |
+ GENI_M_EVENT_EN | GENI_S_EVENT_EN,
+ base + SE_GSI_EVENT_EN);
+ writel(QSPI_M_IRQ_EN_GPI, base + SE_GENI_M_IRQ_EN);
+ writel(QSPI_S_IRQ_EN_GPI, base + SE_GENI_S_IRQ_EN);
+ writel(0xf, base + SE_DMA_TX_IRQ_EN_CLR);
+ writel(QSPI_DMA_TX_IRQ_EN, base + SE_DMA_TX_IRQ_EN_SET);
+ writel(0xfff, base + SE_DMA_RX_IRQ_EN_CLR);
+ writel(QSPI_DMA_RX_IRQ_EN, base + SE_DMA_RX_IRQ_EN_SET);
+ writel(0xffc07fff, base + SE_GENI_M_IRQ_CLEAR);
+ writel(0x0fc07f3f, base + SE_GENI_S_IRQ_CLEAR);
+ writel(0xf, base + SE_DMA_TX_IRQ_CLR);
+ writel(0xfff, base + SE_DMA_RX_IRQ_CLR);
+}
+
static void spi_slv_setup(struct spi_geni_master *mas)
{
struct geni_se *se = &mas->se;
@@ -411,7 +517,20 @@
}
if (xfer->tx_buf && xfer->rx_buf) {
- peripheral.cmd = SPI_DUPLEX;
+ /*
+ * QSPI uses SPI_TX_RX (7) for TX-opcode-then-RX-data transfers;
+ * standard SPI uses SPI_DUPLEX (3) for true full-duplex.
+ *
+ * For QSPI_TX_RX the GSI firmware needs an explicit rx_len so
+ * it knows how many bytes to clock in after the TX command
+ * phase. SPI_DUPLEX uses xfer->len implicitly.
+ */
+ if (spi_geni_is_qspi(mas)) {
+ peripheral.cmd = SPI_TX_RX;
+ peripheral.rx_len = (xfer->len << 3) / xfer->bits_per_word;
+ } else {
+ peripheral.cmd = SPI_DUPLEX;
+ }
} else if (xfer->tx_buf) {
peripheral.cmd = SPI_TX;
peripheral.rx_len = 0;
@@ -445,6 +564,44 @@
peripheral.fragmentation = FRAGMENTATION;
}
+ if (spi_geni_is_qspi(mas)) {
+ bool multi = !list_is_singular(&spi->cur_msg->transfers);
+
+ peripheral.qspi_mode = true;
+
+ /*
+ * In a multi-transfer message the first write uses SINGLE_SDR
+ * (opcode+addr lane transition) while subsequent TX-only
+ * transfers stay in QUAD.
+ */
+ if (multi && xfer->tx_buf && !xfer->rx_buf &&
+ &xfer->transfer_list == spi->cur_msg->transfers.next)
+ peripheral.qspi_lane_flags = QSPI_SINGLE_SDR;
+ else
+ peripheral.qspi_lane_flags = QSPI_QUAD_SDR;
+
+ if (peripheral.cmd == SPI_TX_RX && xfer->tx_buf) {
+ const u8 *tx = xfer->tx_buf;
+
+ /*
+ * The configured read opcode is followed by an address
+ * then dummy clocks before the device drives the data
+ * lanes. Any other first byte means the host is issuing
+ * a write command, so downgrade to TX-only.
+ */
+ if (tx[0] == mas->qspi.read_opcode) {
+ peripheral.dummy_clk_cnt = mas->qspi.dummy_clk_cnt;
+ peripheral.tx_cmd_len = mas->qspi.tx_cmd_len;
+ } else {
+ peripheral.cmd = SPI_TX;
+ peripheral.dummy_clk_cnt = 0;
+ peripheral.rx_len = 0;
+ }
+ } else if (peripheral.cmd == SPI_RX) {
+ peripheral.dummy_clk_cnt = mas->qspi.dummy_clk_cnt;
+ }
+ }
+
if (peripheral.cmd & SPI_RX) {
dmaengine_slave_config(mas->rx, &config);
rx_desc = dmaengine_prep_slave_sg(mas->rx, xfer->rx_sg.sgl, xfer->rx_sg.nents,
@@ -467,8 +624,18 @@
return -EIO;
}
- tx_desc->callback_result = spi_gsi_callback_result;
- tx_desc->callback_param = spi;
+ /*
+ * In QSPI mode the Go TRE on the TX channel has no DMA TRE for SPI_RX
+ * (and for TX_RX completes TX-side early), so the real completion
+ * event is the IEOT on the RX channel. Attach the callback there.
+ */
+ if (spi_geni_is_qspi(mas) && (peripheral.cmd & SPI_RX)) {
+ rx_desc->callback_result = spi_gsi_callback_result;
+ rx_desc->callback_param = spi;
+ } else {
+ tx_desc->callback_result = spi_gsi_callback_result;
+ tx_desc->callback_param = spi;
+ }
if (peripheral.cmd & SPI_RX)
dmaengine_submit(rx_desc);
@@ -534,7 +701,13 @@
return ret;
case GENI_GPI_DMA:
- /* nothing to do for GPI DMA */
+ /*
+ * In QSPI mode, runtime_resume's call to geni_se_resources_on()
+ * clobbers the GPI-specific IRQ/DMA register layout. Restore
+ * it before every message.
+ */
+ if (spi_geni_is_qspi(mas))
+ prep_se_for_gpi_dma(mas);
return 0;
}
@@ -609,6 +782,18 @@
goto out_pm;
}
spi_slv_setup(mas);
+ } else if (spi_geni_is_qspi(mas)) {
+ /*
+ * QSPI SEs report protocol 9 in hardware. The GENI_SE_SPI
+ * firmware loader cannot be used here (no firmware for proto 9
+ * is shipped), so reject anything else.
+ */
+ if (proto != QSPI_SE_PROTO) {
+ dev_err(mas->dev, "Expected QSPI proto %d, got %d\n",
+ QSPI_SE_PROTO, proto);
+ goto out_pm;
+ }
+ qspi_setup_io_mux(mas);
} else if (proto == GENI_SE_INVALID_PROTO) {
ret = geni_load_se_firmware(se, GENI_SE_SPI);
if (ret) {
@@ -640,9 +825,28 @@
else
mas->oversampling = 1;
+ /*
+ * QSPI SEs cannot use FIFO mode (the FIFO path would try to drive
+ * a single-lane word format on a 4-lane QSPI bus), so force GPI DMA
+ * regardless of what GENI_IF_DISABLE_RO reports.
+ */
fifo_disable = readl(se->base + GENI_IF_DISABLE_RO) & FIFO_IF_DISABLE;
+ if (spi_geni_is_qspi(mas))
+ fifo_disable = 1;
+
switch (fifo_disable) {
case 1:
+ /*
+ * For QSPI, bring the SE up in SE_DMA first (which arms the
+ * DMA-related registers) and let it settle before switching
+ * to GPI_DMA and grabbing the GPII channels. Without this
+ * intermediate step the first GPI command after probe can
+ * hang on the CH STOP completion.
+ */
+ if (spi_geni_is_qspi(mas)) {
+ geni_se_select_mode(se, GENI_SE_DMA);
+ msleep(10);
+ }
ret = spi_geni_grab_gpi_chan(mas);
if (!ret) { /* success case */
mas->cur_xfer_mode = GENI_GPI_DMA;
@@ -653,6 +857,16 @@
goto out_pm;
}
/*
+ * For QSPI there is no usable FIFO fallback: FIFO mode cannot
+ * drive a 4-lane QSPI bus. Fail the probe instead of silently
+ * producing garbage.
+ */
+ if (spi_geni_is_qspi(mas)) {
+ dev_err(mas->dev, "Failed to grab GPI DMA channels for QSPI: %d\n",
+ ret);
+ goto out_pm;
+ }
+ /*
* in case of failure to get gpi dma channel, we can still do the
* FIFO mode, so fallthrough
*/
@@ -1052,11 +1266,24 @@
mas = spi_controller_get_devdata(spi);
mas->irq = irq;
mas->dev = dev;
+ mas->data = device_get_match_data(dev);
mas->se.dev = dev;
mas->se.wrapper = dev_get_drvdata(dev->parent);
mas->se.base = base;
mas->se.clk = clk;
+ if (spi_geni_is_qspi(mas)) {
+ mas->qspi.read_opcode = QSPI_DEFAULT_READ_OPCODE;
+ mas->qspi.dummy_clk_cnt = QSPI_DEFAULT_DUMMY_CLK_CNT;
+ mas->qspi.tx_cmd_len = QSPI_DEFAULT_TX_CMD_LEN;
+ device_property_read_u32(dev, "qcom,qspi-read-opcode",
+ &mas->qspi.read_opcode);
+ device_property_read_u32(dev, "qcom,qspi-read-dummy-clocks",
+ &mas->qspi.dummy_clk_cnt);
+ device_property_read_u32(dev, "qcom,qspi-read-cmd-bytes",
+ &mas->qspi.tx_cmd_len);
+ }
+
ret = devm_pm_opp_set_clkname(&pdev->dev, "se");
if (ret)
return ret;
@@ -1069,9 +1296,12 @@
spi->bus_num = -1;
spi->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP | SPI_CS_HIGH;
+ if (spi_geni_is_qspi(mas))
+ spi->mode_bits |= SPI_TX_QUAD | SPI_RX_QUAD;
spi->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
spi->num_chipselect = 4;
- spi->max_speed_hz = 50000000;
+ spi->max_speed_hz = spi_geni_is_qspi(mas) ? QSPI_DEFAULT_MAX_SPEED_HZ
+ : 50000000;
spi->max_dma_len = 0xffff0; /* 24 bits for tx/rx dma length */
spi->prepare_message = spi_geni_prepare_message;
spi->transfer_one = spi_geni_transfer_one;
@@ -1197,8 +1427,13 @@
SET_SYSTEM_SLEEP_PM_OPS(spi_geni_suspend, spi_geni_resume)
};
+static const struct spi_geni_data spi_geni_qspi_data = {
+ .qspi_mode = true,
+};
+
static const struct of_device_id spi_geni_dt_match[] = {
{ .compatible = "qcom,geni-spi" },
+ { .compatible = "qcom,geni-spi-qspi", .data = &spi_geni_qspi_data },
{}
};
MODULE_DEVICE_TABLE(of, spi_geni_dt_match);