From patchwork Tue Jan 23 13:26:10 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [01/44] drm/bridge: sii902x: Fix audio codec unregistration From: Imre Deak X-Patchwork-Id: 575580 Message-Id: <20240123132653.413364-2-imre.deak@intel.com> To: intel-gfx-trybot@lists.freedesktop.org Date: Tue, 23 Jan 2024 15:26:10 +0200 From: Tomi Valkeinen The driver never unregisters the audio codec platform device, which can lead to a crash on module reloading, nor does it handle the return value from sii902x_audio_codec_init(). Signed-off-by: Tomi Valkeinen Fixes: ff5781634c41 ("drm/bridge: sii902x: Implement HDMI audio support") Cc: Jyri Sarha Reviewed-by: Aradhya Bhatia Acked-by: Linus Walleij Signed-off-by: Robert Foss Link: https://patchwork.freedesktop.org/patch/msgid/20240103-si902x-fixes-v1-2-b9fd3e448411@ideasonboard.com --- drivers/gpu/drm/bridge/sii902x.c | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c index 69da73e414a9a..4560ae9cbce15 100644 --- a/drivers/gpu/drm/bridge/sii902x.c +++ b/drivers/gpu/drm/bridge/sii902x.c @@ -1080,7 +1080,9 @@ static int sii902x_init(struct sii902x *sii902x) return ret; } - sii902x_audio_codec_init(sii902x, dev); + ret = sii902x_audio_codec_init(sii902x, dev); + if (ret) + return ret; i2c_set_clientdata(sii902x->i2c, sii902x); @@ -1088,13 +1090,15 @@ static int sii902x_init(struct sii902x *sii902x) 1, 0, I2C_MUX_GATE, sii902x_i2c_bypass_select, sii902x_i2c_bypass_deselect); - if (!sii902x->i2cmux) - return -ENOMEM; + if (!sii902x->i2cmux) { + ret = -ENOMEM; + goto err_unreg_audio; + } sii902x->i2cmux->priv = sii902x; ret = i2c_mux_add_adapter(sii902x->i2cmux, 0, 0, 0); if (ret) - return ret; + goto err_unreg_audio; sii902x->bridge.funcs = &sii902x_bridge_funcs; sii902x->bridge.of_node = dev->of_node; @@ -1107,6 +1111,12 @@ static int sii902x_init(struct sii902x *sii902x) drm_bridge_add(&sii902x->bridge); return 0; + +err_unreg_audio: + if (!PTR_ERR_OR_ZERO(sii902x->audio.pdev)) + platform_device_unregister(sii902x->audio.pdev); + + return ret; } static int sii902x_probe(struct i2c_client *client) @@ -1179,6 +1189,9 @@ static void sii902x_remove(struct i2c_client *client) drm_bridge_remove(&sii902x->bridge); i2c_mux_del_adapters(sii902x->i2cmux); + + if (!PTR_ERR_OR_ZERO(sii902x->audio.pdev)) + platform_device_unregister(sii902x->audio.pdev); } static const struct of_device_id sii902x_dt_ids[] = { From patchwork Tue Jan 23 13:26:11 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [02/44] drm-tip: 2024y-01m-23d-11h-59m-12s UTC integration manifest From: Imre Deak X-Patchwork-Id: 575579 Message-Id: <20240123132653.413364-3-imre.deak@intel.com> To: intel-gfx-trybot@lists.freedesktop.org Date: Tue, 23 Jan 2024 15:26:11 +0200 From: Robert Foss --- integration-manifest | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) create mode 100644 integration-manifest diff --git a/integration-manifest b/integration-manifest new file mode 100644 index 0000000000000..4f5c736c1639a --- /dev/null +++ b/integration-manifest @@ -0,0 +1,36 @@ +drm drm-fixes 6613476e225e090cc9aad49be7fa504e290dd33d + Linux 6.8-rc1 +drm-misc drm-misc-fixes 1a84c213146a06aca1fd0e5b376ab7d36d15e1b3 + drm/dp_mst: Separate @failing_port list in drm_dp_mst_atomic_check_mgr() comment +drm-intel drm-intel-fixes 6613476e225e090cc9aad49be7fa504e290dd33d + Linux 6.8-rc1 +drm-xe drm-xe-fixes 6613476e225e090cc9aad49be7fa504e290dd33d + Linux 6.8-rc1 +drm drm-next 6613476e225e090cc9aad49be7fa504e290dd33d + Linux 6.8-rc1 +drm-misc drm-misc-next-fixes 1f1626ac0428820f998245478610f452650bcab5 + drm/ttm: fix ttm pool initialization for no-dma-device drivers +drm-intel drm-intel-next-fixes 84b5ece64477df4394d362d494a2496bf0878985 + drm/i915: Drop -Wstringop-overflow +drm-xe drm-xe-next-fixes bf3ff145df184698a8a80b33265064638572366f + drm/xe: display support should not depend on EXPERT +drm-misc drm-misc-next bc77bde2d3f078c038f69c65387dca6fe0faacbd + drm/bridge: sii902x: Fix audio codec unregistration +drm-intel drm-intel-next ba407525f8247ee4c270369f3371b9994c27bfda + drm/i915: Try to preserve the current shared_dpll for fastset on type-c ports +drm-intel drm-intel-gt-next c44d4ef47fdad0a33966de89f9064e19736bb52f + drm/i915/xelpg: Extend some workarounds/tuning to gfx version 12.74 +drm-xe drm-xe-next be3382ecdf317f005e7d47356d0a9256cc36dd88 + Merge drm/drm-next into drm-xe-next +drm-intel topic/core-for-CI 240520520b9051b237e59e8746ac08ccf9c0fa27 + HAX net/phy: Suppress WARN from phy_error +drm topic/nouveau-misc dfc4005f8c172eea359f9db08c3b2b0ff0153699 + drm/nouveau/disp: move DAC load detection method +drm topic/drm-ci ad6bfe1b66a5c146ec236847eca7af4c8806d666 + drm: ci: docs: fix build warning - add missing escape +drm topic/vmemdup-user-array 06ab64a0d836ac430c5f94669710a78aa43942cb + drm: vmwgfx_surface.c: copy user-array safely +drm topic/nvidia-gsp 8d55b0a940bb10592ffaad68d14314823ddf4cdf + nouveau/gsp: add some basic registry entries. +drm-xe topic/xe-for-CI e4cfe0cd981952f6ed3d5b92a0b7e3ae0b3ad2b4 + drm/xe/guc: define LNL FW From patchwork Tue Jan 23 13:26:12 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [03/44] thunderbolt: Introduce tb_port_reset() From: Imre Deak X-Patchwork-Id: 575594 Message-Id: <20240123132653.413364-4-imre.deak@intel.com> To: intel-gfx-trybot@lists.freedesktop.org Date: Tue, 23 Jan 2024 15:26:12 +0200 From: Sanath S Introduce a function that issues Downstream Port Reset to a USB4 port. This supports Thunderbolt 2, 3 and USB4 routers. Signed-off-by: Sanath S Signed-off-by: Mika Westerberg --- drivers/thunderbolt/lc.c | 45 +++++++++++++++++++++++++++++++++++ drivers/thunderbolt/switch.c | 7 ++++++ drivers/thunderbolt/tb.h | 2 ++ drivers/thunderbolt/tb_regs.h | 4 ++++ drivers/thunderbolt/usb4.c | 39 ++++++++++++++++++++++++++++++ 5 files changed, 97 insertions(+) diff --git a/drivers/thunderbolt/lc.c b/drivers/thunderbolt/lc.c index 633970fbe9b05..63cb4b6afb718 100644 --- a/drivers/thunderbolt/lc.c +++ b/drivers/thunderbolt/lc.c @@ -6,6 +6,8 @@ * Author: Mika Westerberg */ +#include + #include "tb.h" /** @@ -45,6 +47,49 @@ static int find_port_lc_cap(struct tb_port *port) return sw->cap_lc + start + phys * size; } +/** + * tb_lc_reset_port() - Trigger downstream port reset through LC + * @port: Port that is reset + * + * Triggers downstream port reset through link controller registers. + * Returns %0 in case of success negative errno otherwise. Only supports + * non-USB4 routers with link controller (that's Thunderbolt 2 and + * Thunderbolt 3). + */ +int tb_lc_reset_port(struct tb_port *port) +{ + struct tb_switch *sw = port->sw; + int cap, ret; + u32 mode; + + if (sw->generation < 2) + return -EINVAL; + + cap = find_port_lc_cap(port); + if (cap < 0) + return cap; + + ret = tb_sw_read(sw, &mode, TB_CFG_SWITCH, cap + TB_LC_PORT_MODE, 1); + if (ret) + return ret; + + mode |= TB_LC_PORT_MODE_DPR; + + ret = tb_sw_write(sw, &mode, TB_CFG_SWITCH, cap + TB_LC_PORT_MODE, 1); + if (ret) + return ret; + + fsleep(10000); + + ret = tb_sw_read(sw, &mode, TB_CFG_SWITCH, cap + TB_LC_PORT_MODE, 1); + if (ret) + return ret; + + mode &= ~TB_LC_PORT_MODE_DPR; + + return tb_sw_write(sw, &mode, TB_CFG_SWITCH, cap + TB_LC_PORT_MODE, 1); +} + static int tb_lc_set_port_configured(struct tb_port *port, bool configured) { bool upstream = tb_is_upstream_port(port); diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c index 900114ba4371b..b0e69d4313cea 100644 --- a/drivers/thunderbolt/switch.c +++ b/drivers/thunderbolt/switch.c @@ -676,6 +676,13 @@ int tb_port_disable(struct tb_port *port) return __tb_port_enable(port, false); } +static int tb_port_reset(struct tb_port *port) +{ + if (tb_switch_is_usb4(port->sw)) + return port->cap_usb4 ? usb4_port_reset(port) : 0; + return tb_lc_reset_port(port); +} + /* * tb_init_port() - initialize a port * diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h index 997c5a5369052..c38b047ba14be 100644 --- a/drivers/thunderbolt/tb.h +++ b/drivers/thunderbolt/tb.h @@ -1169,6 +1169,7 @@ int tb_drom_read(struct tb_switch *sw); int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid); int tb_lc_read_uuid(struct tb_switch *sw, u32 *uuid); +int tb_lc_reset_port(struct tb_port *port); int tb_lc_configure_port(struct tb_port *port); void tb_lc_unconfigure_port(struct tb_port *port); int tb_lc_configure_xdomain(struct tb_port *port); @@ -1301,6 +1302,7 @@ void usb4_switch_remove_ports(struct tb_switch *sw); int usb4_port_unlock(struct tb_port *port); int usb4_port_hotplug_enable(struct tb_port *port); +int usb4_port_reset(struct tb_port *port); int usb4_port_configure(struct tb_port *port); void usb4_port_unconfigure(struct tb_port *port); int usb4_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd); diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h index 87e4795275fe6..efcae298b3705 100644 --- a/drivers/thunderbolt/tb_regs.h +++ b/drivers/thunderbolt/tb_regs.h @@ -389,6 +389,7 @@ struct tb_regs_port_header { #define PORT_CS_18_CSA BIT(22) #define PORT_CS_18_TIP BIT(24) #define PORT_CS_19 0x13 +#define PORT_CS_19_DPR BIT(0) #define PORT_CS_19_PC BIT(3) #define PORT_CS_19_PID BIT(4) #define PORT_CS_19_WOC BIT(16) @@ -584,6 +585,9 @@ struct tb_regs_hop { #define TB_LC_POWER 0x740 /* Link controller registers */ +#define TB_LC_PORT_MODE 0x26 +#define TB_LC_PORT_MODE_DPR BIT(0) + #define TB_LC_CS_42 0x2a #define TB_LC_CS_42_USB_PLUGGED BIT(31) diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c index f8f0d24ff6e46..4b35898aa2166 100644 --- a/drivers/thunderbolt/usb4.c +++ b/drivers/thunderbolt/usb4.c @@ -1113,6 +1113,45 @@ int usb4_port_hotplug_enable(struct tb_port *port) return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_5, 1); } +/** + * usb4_port_reset() - Issue downstream port reset + * @port: USB4 port to reset + * + * Issues downstream port reset to @port. + */ +int usb4_port_reset(struct tb_port *port) +{ + int ret; + u32 val; + + if (!port->cap_usb4) + return -EINVAL; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_usb4 + PORT_CS_19, 1); + if (ret) + return ret; + + val |= PORT_CS_19_DPR; + + ret = tb_port_write(port, &val, TB_CFG_PORT, + port->cap_usb4 + PORT_CS_19, 1); + if (ret) + return ret; + + fsleep(10000); + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_usb4 + PORT_CS_19, 1); + if (ret) + return ret; + + val &= ~PORT_CS_19_DPR; + + return tb_port_write(port, &val, TB_CFG_PORT, + port->cap_usb4 + PORT_CS_19, 1); +} + static int usb4_port_set_configured(struct tb_port *port, bool configured) { int ret; From patchwork Tue Jan 23 13:26:13 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [04/44] thunderbolt: Introduce tb_path_deactivate_hop() From: Imre Deak X-Patchwork-Id: 575581 Message-Id: <20240123132653.413364-5-imre.deak@intel.com> To: intel-gfx-trybot@lists.freedesktop.org Date: Tue, 23 Jan 2024 15:26:13 +0200 From: Sanath S This function can be used to clear path config space of an adapter. Make it available for other files in this driver. Signed-off-by: Sanath S Signed-off-by: Mika Westerberg --- drivers/thunderbolt/path.c | 13 +++++++++++++ drivers/thunderbolt/tb.h | 1 + 2 files changed, 14 insertions(+) diff --git a/drivers/thunderbolt/path.c b/drivers/thunderbolt/path.c index 091a81bbdbdc9..f760e54cd9bd1 100644 --- a/drivers/thunderbolt/path.c +++ b/drivers/thunderbolt/path.c @@ -446,6 +446,19 @@ static int __tb_path_deactivate_hop(struct tb_port *port, int hop_index, return -ETIMEDOUT; } +/** + * tb_path_deactivate_hop() - Deactivate one path in path config space + * @port: Lane or protocol adapter + * @hop_index: HopID of the path to be cleared + * + * This deactivates or clears a single path config space entry at + * @hop_index. Returns %0 in success and negative errno otherwise. + */ +int tb_path_deactivate_hop(struct tb_port *port, int hop_index) +{ + return __tb_path_deactivate_hop(port, hop_index, true); +} + static void __tb_path_deactivate_hops(struct tb_path *path, int first_hop) { int i, res; diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h index c38b047ba14be..2c689e3b02b94 100644 --- a/drivers/thunderbolt/tb.h +++ b/drivers/thunderbolt/tb.h @@ -1150,6 +1150,7 @@ struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid, void tb_path_free(struct tb_path *path); int tb_path_activate(struct tb_path *path); void tb_path_deactivate(struct tb_path *path); +int tb_path_deactivate_hop(struct tb_port *port, int hop_index); bool tb_path_is_invalid(struct tb_path *path); bool tb_path_port_on_path(const struct tb_path *path, const struct tb_port *port); From patchwork Tue Jan 23 13:26:14 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [05/44] thunderbolt: Make tb_switch_reset() support Thunderbolt 2, 3 and USB4 routers From: Imre Deak X-Patchwork-Id: 575583 Message-Id: <20240123132653.413364-6-imre.deak@intel.com> To: intel-gfx-trybot@lists.freedesktop.org Date: Tue, 23 Jan 2024 15:26:14 +0200 From: Sanath S Currently tb_switch_reset() only did something for Thunderbolt 1 devices. Expand this to support all generations, including USB4, and both host and device routers. Signed-off-by: Sanath S Signed-off-by: Mika Westerberg --- drivers/thunderbolt/switch.c | 123 ++++++++++++++++++++++++++++++---- drivers/thunderbolt/tb_regs.h | 2 + 2 files changed, 111 insertions(+), 14 deletions(-) diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c index b0e69d4313cea..e7bda8729c7e2 100644 --- a/drivers/thunderbolt/switch.c +++ b/drivers/thunderbolt/switch.c @@ -1538,29 +1538,124 @@ static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw) regs->__unknown1, regs->__unknown4); } +static int tb_switch_reset_host(struct tb_switch *sw) +{ + if (sw->generation > 1) { + struct tb_port *port; + + tb_switch_for_each_port(sw, port) { + int i, ret; + + /* + * For lane adapters we issue downstream port + * reset and clear up path config spaces. + * + * For protocol adapters we disable the path and + * clear path config space one by one (from 8 to + * Max Input HopID of the adapter). + */ + if (tb_port_is_null(port) && !tb_is_upstream_port(port)) { + ret = tb_port_reset(port); + if (ret) + return ret; + } else if (tb_port_is_usb3_down(port) || + tb_port_is_usb3_up(port)) { + tb_usb3_port_enable(port, false); + } else if (tb_port_is_dpin(port) || + tb_port_is_dpout(port)) { + tb_dp_port_enable(port, false); + } else if (tb_port_is_pcie_down(port) || + tb_port_is_pcie_up(port)) { + tb_pci_port_enable(port, false); + } else { + continue; + } + + /* Cleanup path config space of protocol adapter */ + for (i = TB_PATH_MIN_HOPID; + i <= port->config.max_in_hop_id; i++) { + ret = tb_path_deactivate_hop(port, i); + if (ret) + return ret; + } + } + } else { + struct tb_cfg_result res; + + /* Thunderbolt 1 uses the "reset" config space packet */ + res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2, + TB_CFG_SWITCH, 2, 2); + if (res.err) + return res.err; + res = tb_cfg_reset(sw->tb->ctl, tb_route(sw)); + if (res.err > 0) + return -EIO; + else if (res.err < 0) + return res.err; + } + + return 0; +} + +static int tb_switch_reset_device(struct tb_switch *sw) +{ + return tb_port_reset(tb_switch_downstream_port(sw)); +} + +static bool tb_switch_enumerated(struct tb_switch *sw) +{ + u32 val; + int ret; + + /* + * Read directly from the hardware because we use this also + * during system sleep where sw->config.enabled is already set + * by us. + */ + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_3, 1); + if (ret) + return false; + + return !!(val & ROUTER_CS_3_V); +} + /** - * tb_switch_reset() - reconfigure route, enable and send TB_CFG_PKG_RESET - * @sw: Switch to reset + * tb_switch_reset() - Perform reset to the router + * @sw: Router to reset * - * Return: Returns 0 on success or an error code on failure. + * Issues reset to the router @sw. Can be used for any router. For host + * routers, resets all the downstream ports and cleans up path config + * spaces accordingly. For device routers issues downstream port reset + * through the parent router, so as side effect there will be unplug + * soon after this is finished. + * + * If the router is not enumerated does nothing. + * + * Returns %0 on success or negative errno in case of failure. */ int tb_switch_reset(struct tb_switch *sw) { - struct tb_cfg_result res; + int ret; - if (sw->generation > 1) + /* + * We cannot access the port config spaces unless the router is + * already enumerated. If the router is not enumerated it is + * equal to being reset so we can skip that here. + */ + if (!tb_switch_enumerated(sw)) return 0; - tb_sw_dbg(sw, "resetting switch\n"); + tb_sw_dbg(sw, "resetting\n"); - res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2, - TB_CFG_SWITCH, 2, 2); - if (res.err) - return res.err; - res = tb_cfg_reset(sw->tb->ctl, tb_route(sw)); - if (res.err > 0) - return -EIO; - return res.err; + if (tb_route(sw)) + ret = tb_switch_reset_device(sw); + else + ret = tb_switch_reset_host(sw); + + if (ret) + tb_sw_warn(sw, "failed to reset\n"); + + return ret; } /** diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h index efcae298b3705..1716babcbbd4a 100644 --- a/drivers/thunderbolt/tb_regs.h +++ b/drivers/thunderbolt/tb_regs.h @@ -194,6 +194,8 @@ struct tb_regs_switch_header { #define USB4_VERSION_MAJOR_MASK GENMASK(7, 5) #define ROUTER_CS_1 0x01 +#define ROUTER_CS_3 0x03 +#define ROUTER_CS_3_V BIT(31) #define ROUTER_CS_4 0x04 /* Used with the router cmuv field */ #define ROUTER_CS_4_CMUV_V1 0x10 From patchwork Tue Jan 23 13:26:15 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [06/44] thunderbolt: Reset topology created by the boot firmware From: Imre Deak X-Patchwork-Id: 575582 Message-Id: <20240123132653.413364-7-imre.deak@intel.com> To: intel-gfx-trybot@lists.freedesktop.org Date: Tue, 23 Jan 2024 15:26:15 +0200 From: Sanath S Boot firmware (typically BIOS) might have created tunnels of its own. The tunnel configuration that it does might be sub-optimal. For instance it may only support HBR2 monitors so the DisplayPort tunnels it created may limit Linux graphics drivers. In addition there is an issue on some AMD based systems where the BIOS does not allocate enough PCIe resources for future topology extension. By resetting the USB4 topology the PCIe links will be reset as well allowing Linux to re-allocate. This aligns the behavior with Windows Connection Manager. We already issued host router reset for USB4 v2 routers, now extend it to USB4 v1 routers as well. For pre-USB4 (that's Apple systems) we leave it as is and continue to discover the existing tunnels. Suggested-by: Mario Limonciello Signed-off-by: Sanath S Signed-off-by: Mika Westerberg --- drivers/thunderbolt/domain.c | 5 +++-- drivers/thunderbolt/icm.c | 2 +- drivers/thunderbolt/nhi.c | 19 +++++++++++++------ drivers/thunderbolt/tb.c | 26 +++++++++++++++++++------- drivers/thunderbolt/tb.h | 4 ++-- 5 files changed, 38 insertions(+), 18 deletions(-) diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c index 9fb1a64f3300b..df0d845e069ac 100644 --- a/drivers/thunderbolt/domain.c +++ b/drivers/thunderbolt/domain.c @@ -423,6 +423,7 @@ struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize /** * tb_domain_add() - Add domain to the system * @tb: Domain to add + * @reset: Issue reset to the host router * * Starts the domain and adds it to the system. Hotplugging devices will * work after this has been returned successfully. In order to remove @@ -431,7 +432,7 @@ struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize * * Return: %0 in case of success and negative errno in case of error */ -int tb_domain_add(struct tb *tb) +int tb_domain_add(struct tb *tb, bool reset) { int ret; @@ -460,7 +461,7 @@ int tb_domain_add(struct tb *tb) /* Start the domain */ if (tb->cm_ops->start) { - ret = tb->cm_ops->start(tb); + ret = tb->cm_ops->start(tb, reset); if (ret) goto err_domain_del; } diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c index 56790d50f9e32..baf10d099c778 100644 --- a/drivers/thunderbolt/icm.c +++ b/drivers/thunderbolt/icm.c @@ -2144,7 +2144,7 @@ static int icm_runtime_resume(struct tb *tb) return 0; } -static int icm_start(struct tb *tb) +static int icm_start(struct tb *tb, bool not_used) { struct icm *icm = tb_priv(tb); int ret; diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c index fb4f46e51753a..b22023fae60de 100644 --- a/drivers/thunderbolt/nhi.c +++ b/drivers/thunderbolt/nhi.c @@ -1221,7 +1221,7 @@ static void nhi_check_iommu(struct tb_nhi *nhi) str_enabled_disabled(port_ok)); } -static void nhi_reset(struct tb_nhi *nhi) +static bool nhi_reset(struct tb_nhi *nhi) { ktime_t timeout; u32 val; @@ -1229,11 +1229,11 @@ static void nhi_reset(struct tb_nhi *nhi) val = ioread32(nhi->iobase + REG_CAPS); /* Reset only v2 and later routers */ if (FIELD_GET(REG_CAPS_VERSION_MASK, val) < REG_CAPS_VERSION_2) - return; + return false; if (!host_reset) { dev_dbg(&nhi->pdev->dev, "skipping host router reset\n"); - return; + return false; } iowrite32(REG_RESET_HRR, nhi->iobase + REG_RESET); @@ -1244,12 +1244,14 @@ static void nhi_reset(struct tb_nhi *nhi) val = ioread32(nhi->iobase + REG_RESET); if (!(val & REG_RESET_HRR)) { dev_warn(&nhi->pdev->dev, "host router reset successful\n"); - return; + return true; } usleep_range(10, 20); } while (ktime_before(ktime_get(), timeout)); dev_warn(&nhi->pdev->dev, "timeout resetting host router\n"); + + return false; } static int nhi_init_msi(struct tb_nhi *nhi) @@ -1331,6 +1333,7 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id) struct device *dev = &pdev->dev; struct tb_nhi *nhi; struct tb *tb; + bool reset; int res; if (!nhi_imr_valid(pdev)) @@ -1365,7 +1368,11 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id) nhi_check_quirks(nhi); nhi_check_iommu(nhi); - nhi_reset(nhi); + /* + * Only USB4 v2 hosts support host reset so if we already did + * that then don't do it again when the domain is initialized. + */ + reset = nhi_reset(nhi) ? false : host_reset; res = nhi_init_msi(nhi); if (res) @@ -1392,7 +1399,7 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id) dev_dbg(dev, "NHI initialized, starting thunderbolt\n"); - res = tb_domain_add(tb); + res = tb_domain_add(tb, reset); if (res) { /* * At this point the RX/TX rings might already have been diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c index 846d2813bb1a5..9a261560d0f4c 100644 --- a/drivers/thunderbolt/tb.c +++ b/drivers/thunderbolt/tb.c @@ -2581,7 +2581,7 @@ static int tb_scan_finalize_switch(struct device *dev, void *data) return 0; } -static int tb_start(struct tb *tb) +static int tb_start(struct tb *tb, bool reset) { struct tb_cm *tcm = tb_priv(tb); int ret; @@ -2622,12 +2622,24 @@ static int tb_start(struct tb *tb) tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_MODE_LOWRES); /* Enable TMU if it is off */ tb_switch_tmu_enable(tb->root_switch); - /* Full scan to discover devices added before the driver was loaded. */ - tb_scan_switch(tb->root_switch); - /* Find out tunnels created by the boot firmware */ - tb_discover_tunnels(tb); - /* Add DP resources from the DP tunnels created by the boot firmware */ - tb_discover_dp_resources(tb); + + /* + * Boot firmware might have created tunnels of its own. Since we + * cannot be sure they are usable for us, tear them down and + * reset the ports to handle it as new hotplug for USB4 v1 + * routers (for USB4 v2 and beyond we already do host reset). + */ + if (reset && usb4_switch_version(tb->root_switch) == 1) { + tb_switch_reset(tb->root_switch); + } else { + /* Full scan to discover devices added before the driver was loaded. */ + tb_scan_switch(tb->root_switch); + /* Find out tunnels created by the boot firmware */ + tb_discover_tunnels(tb); + /* Add DP resources from the DP tunnels created by the boot firmware */ + tb_discover_dp_resources(tb); + } + /* * If the boot firmware did not create USB 3.x tunnels create them * now for the whole topology. diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h index 2c689e3b02b94..d0dfbf040356d 100644 --- a/drivers/thunderbolt/tb.h +++ b/drivers/thunderbolt/tb.h @@ -483,7 +483,7 @@ struct tb_path { */ struct tb_cm_ops { int (*driver_ready)(struct tb *tb); - int (*start)(struct tb *tb); + int (*start)(struct tb *tb, bool reset); void (*stop)(struct tb *tb); int (*suspend_noirq)(struct tb *tb); int (*resume_noirq)(struct tb *tb); @@ -746,7 +746,7 @@ int tb_xdomain_init(void); void tb_xdomain_exit(void); struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize); -int tb_domain_add(struct tb *tb); +int tb_domain_add(struct tb *tb, bool reset); void tb_domain_remove(struct tb *tb); int tb_domain_suspend_noirq(struct tb *tb); int tb_domain_resume_noirq(struct tb *tb); From patchwork Tue Jan 23 13:26:16 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [07/44] thunderbolt: Fix XDomain rx_lanes_show and tx_lanes_show From: Imre Deak X-Patchwork-Id: 575596 Message-Id: <20240123132653.413364-8-imre.deak@intel.com> To: intel-gfx-trybot@lists.freedesktop.org Date: Tue, 23 Jan 2024 15:26:16 +0200 From: Mohammad Rahimi If the Inter-Domain link is operating in asymmetric TB_LINK_WIDTH_ASYM_TX mode, the rx_lanes_show should return 1 since there is only one receiver and tx_lanes_show should return 3 since there are 3 transmitters. Signed-off-by: Mohammad Rahimi Signed-off-by: Mika Westerberg --- drivers/thunderbolt/xdomain.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c index 9495742913d5c..10693a3ac16dd 100644 --- a/drivers/thunderbolt/xdomain.c +++ b/drivers/thunderbolt/xdomain.c @@ -1791,13 +1791,13 @@ static ssize_t rx_lanes_show(struct device *dev, struct device_attribute *attr, switch (xd->link_width) { case TB_LINK_WIDTH_SINGLE: - case TB_LINK_WIDTH_ASYM_RX: + case TB_LINK_WIDTH_ASYM_TX: width = 1; break; case TB_LINK_WIDTH_DUAL: width = 2; break; - case TB_LINK_WIDTH_ASYM_TX: + case TB_LINK_WIDTH_ASYM_RX: width = 3; break; default: @@ -1817,13 +1817,13 @@ static ssize_t tx_lanes_show(struct device *dev, struct device_attribute *attr, switch (xd->link_width) { case TB_LINK_WIDTH_SINGLE: - case TB_LINK_WIDTH_ASYM_TX: + case TB_LINK_WIDTH_ASYM_RX: width = 1; break; case TB_LINK_WIDTH_DUAL: width = 2; break; - case TB_LINK_WIDTH_ASYM_RX: + case TB_LINK_WIDTH_ASYM_TX: width = 3; break; default: From patchwork Tue Jan 23 13:26:17 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [08/44] thunderbolt: Fix rollback in tb_port_lane_bonding_enable() for lane 1 From: Imre Deak X-Patchwork-Id: 575584 Message-Id: <20240123132653.413364-9-imre.deak@intel.com> To: intel-gfx-trybot@lists.freedesktop.org Date: Tue, 23 Jan 2024 15:26:17 +0200 From: Mohammad Rahimi If enabling lane bonding on lane 1 of a USB4 port results in an error, the rollback should set TB_LINK_WIDTH_SINGLE for both lanes. Signed-off-by: Mohammad Rahimi Signed-off-by: Mika Westerberg --- drivers/thunderbolt/switch.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c index e7bda8729c7e2..bf1daf5165a4e 100644 --- a/drivers/thunderbolt/switch.c +++ b/drivers/thunderbolt/switch.c @@ -1127,7 +1127,7 @@ int tb_port_lane_bonding_enable(struct tb_port *port) ret = tb_port_set_link_width(port->dual_link_port, TB_LINK_WIDTH_DUAL); if (ret) - goto err_lane0; + goto err_lane1; } /* From patchwork Tue Jan 23 13:26:18 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [09/44] thunderbolt: Remove usage of the deprecated ida_simple_xx() API From: Imre Deak X-Patchwork-Id: 575597 Message-Id: <20240123132653.413364-10-imre.deak@intel.com> To: intel-gfx-trybot@lists.freedesktop.org Date: Tue, 23 Jan 2024 15:26:18 +0200 From: Christophe JAILLET ida_alloc() and ida_free() should be preferred to the deprecated ida_simple_get() and ida_simple_remove(). Note that the upper limit of ida_simple_get() is exclusive, but the one of ida_alloc_range()/ida_alloc_max() is inclusive. So a -1 has been added when needed. Signed-off-by: Christophe JAILLET Signed-off-by: Mika Westerberg --- drivers/thunderbolt/domain.c | 6 +++--- drivers/thunderbolt/nhi.c | 6 +++--- drivers/thunderbolt/nvm.c | 4 ++-- drivers/thunderbolt/switch.c | 6 +++--- drivers/thunderbolt/xdomain.c | 4 ++-- 5 files changed, 13 insertions(+), 13 deletions(-) diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c index df0d845e069ac..ee8a894bd70d1 100644 --- a/drivers/thunderbolt/domain.c +++ b/drivers/thunderbolt/domain.c @@ -321,7 +321,7 @@ static void tb_domain_release(struct device *dev) tb_ctl_free(tb->ctl); destroy_workqueue(tb->wq); - ida_simple_remove(&tb_domain_ida, tb->index); + ida_free(&tb_domain_ida, tb->index); mutex_destroy(&tb->lock); kfree(tb); } @@ -389,7 +389,7 @@ struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize tb->nhi = nhi; mutex_init(&tb->lock); - tb->index = ida_simple_get(&tb_domain_ida, 0, 0, GFP_KERNEL); + tb->index = ida_alloc(&tb_domain_ida, GFP_KERNEL); if (tb->index < 0) goto err_free; @@ -413,7 +413,7 @@ struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize err_destroy_wq: destroy_workqueue(tb->wq); err_remove_ida: - ida_simple_remove(&tb_domain_ida, tb->index); + ida_free(&tb_domain_ida, tb->index); err_free: kfree(tb); diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c index b22023fae60de..e8a4623dc5319 100644 --- a/drivers/thunderbolt/nhi.c +++ b/drivers/thunderbolt/nhi.c @@ -465,7 +465,7 @@ static int ring_request_msix(struct tb_ring *ring, bool no_suspend) if (!nhi->pdev->msix_enabled) return 0; - ret = ida_simple_get(&nhi->msix_ida, 0, MSIX_MAX_VECS, GFP_KERNEL); + ret = ida_alloc_max(&nhi->msix_ida, MSIX_MAX_VECS - 1, GFP_KERNEL); if (ret < 0) return ret; @@ -485,7 +485,7 @@ static int ring_request_msix(struct tb_ring *ring, bool no_suspend) return 0; err_ida_remove: - ida_simple_remove(&nhi->msix_ida, ring->vector); + ida_free(&nhi->msix_ida, ring->vector); return ret; } @@ -496,7 +496,7 @@ static void ring_release_msix(struct tb_ring *ring) return; free_irq(ring->irq, ring); - ida_simple_remove(&ring->nhi->msix_ida, ring->vector); + ida_free(&ring->nhi->msix_ida, ring->vector); ring->vector = 0; ring->irq = 0; } diff --git a/drivers/thunderbolt/nvm.c b/drivers/thunderbolt/nvm.c index 69fb3b0fa34fa..8901db2de327c 100644 --- a/drivers/thunderbolt/nvm.c +++ b/drivers/thunderbolt/nvm.c @@ -330,7 +330,7 @@ struct tb_nvm *tb_nvm_alloc(struct device *dev) if (!nvm) return ERR_PTR(-ENOMEM); - ret = ida_simple_get(&nvm_ida, 0, 0, GFP_KERNEL); + ret = ida_alloc(&nvm_ida, GFP_KERNEL); if (ret < 0) { kfree(nvm); return ERR_PTR(ret); @@ -528,7 +528,7 @@ void tb_nvm_free(struct tb_nvm *nvm) nvmem_unregister(nvm->non_active); nvmem_unregister(nvm->active); vfree(nvm->buf); - ida_simple_remove(&nvm_ida, nvm->id); + ida_free(&nvm_ida, nvm->id); } kfree(nvm); } diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c index bf1daf5165a4e..bca6f28c553b0 100644 --- a/drivers/thunderbolt/switch.c +++ b/drivers/thunderbolt/switch.c @@ -778,7 +778,7 @@ static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid, if (max_hopid < 0 || max_hopid > port_max_hopid) max_hopid = port_max_hopid; - return ida_simple_get(ida, min_hopid, max_hopid + 1, GFP_KERNEL); + return ida_alloc_range(ida, min_hopid, max_hopid, GFP_KERNEL); } /** @@ -816,7 +816,7 @@ int tb_port_alloc_out_hopid(struct tb_port *port, int min_hopid, int max_hopid) */ void tb_port_release_in_hopid(struct tb_port *port, int hopid) { - ida_simple_remove(&port->in_hopids, hopid); + ida_free(&port->in_hopids, hopid); } /** @@ -826,7 +826,7 @@ void tb_port_release_in_hopid(struct tb_port *port, int hopid) */ void tb_port_release_out_hopid(struct tb_port *port, int hopid) { - ida_simple_remove(&port->out_hopids, hopid); + ida_free(&port->out_hopids, hopid); } static inline bool tb_switch_is_reachable(const struct tb_switch *parent, diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c index 10693a3ac16dd..b48df88981bda 100644 --- a/drivers/thunderbolt/xdomain.c +++ b/drivers/thunderbolt/xdomain.c @@ -997,7 +997,7 @@ static void tb_service_release(struct device *dev) struct tb_xdomain *xd = tb_service_parent(svc); tb_service_debugfs_remove(svc); - ida_simple_remove(&xd->service_ids, svc->id); + ida_free(&xd->service_ids, svc->id); kfree(svc->key); kfree(svc); } @@ -1099,7 +1099,7 @@ static void enumerate_services(struct tb_xdomain *xd) break; } - id = ida_simple_get(&xd->service_ids, 0, 0, GFP_KERNEL); + id = ida_alloc(&xd->service_ids, GFP_KERNEL); if (id < 0) { kfree(svc->key); kfree(svc); From patchwork Tue Jan 23 13:26:19 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [10/44] thunderbolt: Use DP_LOCAL_CAP for maximum bandwidth calculation From: Imre Deak X-Patchwork-Id: 575585 Message-Id: <20240123132653.413364-11-imre.deak@intel.com> To: intel-gfx-trybot@lists.freedesktop.org Date: Tue, 23 Jan 2024 15:26:19 +0200 From: Mika Westerberg The DisplayPort IN adapter DP_LOCAL_CAP holds the aggregated capabilities and gets updated after graphics side does the DPRX capabilites read so we should use this to figure out the maximum possible bandwidth for the DisplayPort tunnel. While there make the variable name to match better what it is used for and add kernel-doc comment to the function. Fixes: 2d7e04729798 ("thunderbolt: Add DisplayPort 2.x tunneling support") Signed-off-by: Mika Westerberg --- drivers/thunderbolt/tunnel.c | 57 ++++++++++++++++-------------------- 1 file changed, 25 insertions(+), 32 deletions(-) diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c index 6fffb2c82d3d1..a766ab2970645 100644 --- a/drivers/thunderbolt/tunnel.c +++ b/drivers/thunderbolt/tunnel.c @@ -926,12 +926,18 @@ static int tb_dp_activate(struct tb_tunnel *tunnel, bool active) return 0; } -/* max_bw is rounded up to next granularity */ +/** + * tb_dp_bandwidth_mode_maximum_bandwidth() - Maximum possible bandwidth + * @tunnel: DP tunnel to check + * @max_bw_rounded: Maximum bandwidth in Mb/s rounded up to the next granularity + * + * Returns maximum possible bandwidth for this tunnel in Mb/s. + */ static int tb_dp_bandwidth_mode_maximum_bandwidth(struct tb_tunnel *tunnel, - int *max_bw) + int *max_bw_rounded) { struct tb_port *in = tunnel->src_port; - int ret, rate, lanes, nrd_bw; + int ret, rate, lanes, max_bw; u32 cap; /* @@ -947,32 +953,18 @@ static int tb_dp_bandwidth_mode_maximum_bandwidth(struct tb_tunnel *tunnel, return ret; rate = tb_dp_cap_get_rate_ext(cap); - if (tb_dp_is_uhbr_rate(rate)) { - /* - * When UHBR is used there is no reduction in lanes so - * we can use this directly. - */ - lanes = tb_dp_cap_get_lanes(cap); - } else { - /* - * If there is no UHBR supported then check the - * non-reduced rate and lanes. - */ - ret = usb4_dp_port_nrd(in, &rate, &lanes); - if (ret) - return ret; - } + lanes = tb_dp_cap_get_lanes(cap); - nrd_bw = tb_dp_bandwidth(rate, lanes); + max_bw = tb_dp_bandwidth(rate, lanes); - if (max_bw) { + if (max_bw_rounded) { ret = usb4_dp_port_granularity(in); if (ret < 0) return ret; - *max_bw = roundup(nrd_bw, ret); + *max_bw_rounded = roundup(max_bw, ret); } - return nrd_bw; + return max_bw; } static int tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel *tunnel, @@ -981,7 +973,7 @@ static int tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel *tunnel, { struct tb_port *out = tunnel->dst_port; struct tb_port *in = tunnel->src_port; - int ret, allocated_bw, max_bw; + int ret, allocated_bw, max_bw_rounded; if (!usb4_dp_port_bandwidth_mode_enabled(in)) return -EOPNOTSUPP; @@ -995,10 +987,10 @@ static int tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel *tunnel, return ret; allocated_bw = ret; - ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw); + ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw_rounded); if (ret < 0) return ret; - if (allocated_bw == max_bw) + if (allocated_bw == max_bw_rounded) allocated_bw = ret; if (tb_port_path_direction_downstream(in, out)) { @@ -1023,17 +1015,18 @@ static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up * Otherwise we read it from the DPRX. */ if (usb4_dp_port_bandwidth_mode_enabled(in) && tunnel->bw_mode) { - int ret, allocated_bw, max_bw; + int ret, allocated_bw, max_bw_rounded; ret = usb4_dp_port_allocated_bandwidth(in); if (ret < 0) return ret; allocated_bw = ret; - ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw); + ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, + &max_bw_rounded); if (ret < 0) return ret; - if (allocated_bw == max_bw) + if (allocated_bw == max_bw_rounded) allocated_bw = ret; if (tb_port_path_direction_downstream(in, out)) { @@ -1055,24 +1048,24 @@ static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up, { struct tb_port *out = tunnel->dst_port; struct tb_port *in = tunnel->src_port; - int max_bw, ret, tmp; + int max_bw_rounded, ret, tmp; if (!usb4_dp_port_bandwidth_mode_enabled(in)) return -EOPNOTSUPP; - ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw); + ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw_rounded); if (ret < 0) return ret; if (tb_port_path_direction_downstream(in, out)) { - tmp = min(*alloc_down, max_bw); + tmp = min(*alloc_down, max_bw_rounded); ret = usb4_dp_port_allocate_bandwidth(in, tmp); if (ret) return ret; *alloc_down = tmp; *alloc_up = 0; } else { - tmp = min(*alloc_up, max_bw); + tmp = min(*alloc_up, max_bw_rounded); ret = usb4_dp_port_allocate_bandwidth(in, tmp); if (ret) return ret; From patchwork Tue Jan 23 13:26:20 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [11/44] thunderbolt: Re-calculate estimated bandwidth when allocation mode is enabled From: Imre Deak X-Patchwork-Id: 575600 Message-Id: <20240123132653.413364-12-imre.deak@intel.com> To: intel-gfx-trybot@lists.freedesktop.org Date: Tue, 23 Jan 2024 15:26:20 +0200 From: Mika Westerberg When we program the initial bandwidth estimation the DPTX (graphics driver) has not yet read the capabilities of the monitor so the values used are the highest possible of the involved DisplayPort IN and OUT adapters, not the actual monitor capabilites. To allow the graphics more accurate bandwidth estimation re-calculate it once we we receive the bandwidth allocation mode enabled notification. Signed-off-by: Mika Westerberg --- drivers/thunderbolt/tb.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c index 9a261560d0f4c..6679af1efd02c 100644 --- a/drivers/thunderbolt/tb.c +++ b/drivers/thunderbolt/tb.c @@ -2413,10 +2413,19 @@ static void tb_handle_dp_bandwidth_request(struct work_struct *work) ret = usb4_dp_port_requested_bandwidth(in); if (ret < 0) { - if (ret == -ENODATA) - tb_port_dbg(in, "no bandwidth request active\n"); - else + if (ret == -ENODATA) { + /* + * There is no request active so this means the + * BW allocation mode was enabled from graphics + * side. At this point we know that the graphics + * driver has read the DRPX capabilities so we + * can offer an better bandwidth estimatation. + */ + tb_port_dbg(in, "DPTX enabled bandwidth allocation mode, updating estimated bandwidth\n"); + tb_recalc_estimated_bandwidth(tb); + } else { tb_port_warn(in, "failed to read requested bandwidth\n"); + } goto put_sw; } requested_bw = ret; From patchwork Tue Jan 23 13:26:21 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [12/44] thunderbolt: Handle bandwidth allocation mode disable request From: Imre Deak X-Patchwork-Id: 575586 Message-Id: <20240123132653.413364-13-imre.deak@intel.com> To: intel-gfx-trybot@lists.freedesktop.org Date: Tue, 23 Jan 2024 15:26:21 +0200 From: Mika Westerberg Graphics can disable DisplayPort bandwidth allocation mode as well so if this make sure to reset the tunnel state accordingly. Signed-off-by: Mika Westerberg --- drivers/thunderbolt/tb.c | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c index 6679af1efd02c..c5219cb73f41b 100644 --- a/drivers/thunderbolt/tb.c +++ b/drivers/thunderbolt/tb.c @@ -2406,8 +2406,23 @@ static void tb_handle_dp_bandwidth_request(struct work_struct *work) tb_port_dbg(in, "handling bandwidth allocation request\n"); + tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL); + if (!tunnel) { + tb_port_warn(in, "failed to find tunnel\n"); + goto put_sw; + } + if (!usb4_dp_port_bandwidth_mode_enabled(in)) { - tb_port_warn(in, "bandwidth allocation mode not enabled\n"); + if (tunnel->bw_mode) { + /* + * Reset the tunnel back to use the legacy + * allocation. + */ + tunnel->bw_mode = false; + tb_port_dbg(in, "DPTX disabled bandwidth allocation mode\n"); + } else { + tb_port_warn(in, "bandwidth allocation mode not enabled\n"); + } goto put_sw; } @@ -2432,11 +2447,6 @@ static void tb_handle_dp_bandwidth_request(struct work_struct *work) tb_port_dbg(in, "requested bandwidth %d Mb/s\n", requested_bw); - tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL); - if (!tunnel) { - tb_port_warn(in, "failed to find tunnel\n"); - goto put_sw; - } out = tunnel->dst_port; From patchwork Tue Jan 23 13:26:22 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [13/44] thunderbolt: Log an error if DPTX request is not cleared From: Imre Deak X-Patchwork-Id: 575587 Message-Id: <20240123132653.413364-14-imre.deak@intel.com> To: intel-gfx-trybot@lists.freedesktop.org Date: Tue, 23 Jan 2024 15:26:22 +0200 From: Mika Westerberg This helps debugging issues around DisplayPort bandwidth allocation mode. Signed-off-by: Mika Westerberg --- drivers/thunderbolt/usb4.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c index 4b35898aa2166..f4fba144105d0 100644 --- a/drivers/thunderbolt/usb4.c +++ b/drivers/thunderbolt/usb4.c @@ -2858,8 +2858,10 @@ static int usb4_dp_port_wait_and_clear_cm_ack(struct tb_port *port, usleep_range(50, 100); } while (ktime_before(ktime_get(), end)); - if (val & ADP_DP_CS_8_DR) + if (val & ADP_DP_CS_8_DR) { + tb_port_warn(port, "timeout waiting for DPTX request to clear\n"); return -ETIMEDOUT; + } ret = tb_port_read(port, &val, TB_CFG_PORT, port->cap_adap + ADP_DP_CS_2, 1); From patchwork Tue Jan 23 13:26:23 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [14/44] thunderbolt: Fail the failed bandwidth request properly From: Imre Deak X-Patchwork-Id: 575588 Message-Id: <20240123132653.413364-15-imre.deak@intel.com> To: intel-gfx-trybot@lists.freedesktop.org Date: Tue, 23 Jan 2024 15:26:23 +0200 From: Mika Westerberg The USB4 spec says that if the Connection Manager writes Allocated_BW taht is smaller than Requested_BW, the DisplayPort IN adapter signals this failure back to the DPTX (graphics driver). Implement this by rewriting the same allocated bandwidth values back. Signed-off-by: Mika Westerberg --- drivers/thunderbolt/tb.c | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c index c5219cb73f41b..349d93bac8f88 100644 --- a/drivers/thunderbolt/tb.c +++ b/drivers/thunderbolt/tb.c @@ -2270,11 +2270,11 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up, */ ret = tb_tunnel_maximum_bandwidth(tunnel, &max_up, &max_down); if (ret) - return ret; + goto fail; ret = usb4_dp_port_granularity(in); if (ret < 0) - return ret; + goto fail; granularity = ret; max_up_rounded = roundup(max_up, granularity); @@ -2304,7 +2304,8 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up, "bandwidth request too high (%d/%d Mb/s > %d/%d Mb/s)\n", requested_up_corrected, requested_down_corrected, max_up_rounded, max_down_rounded); - return -ENOBUFS; + ret = -ENOBUFS; + goto fail; } if ((*requested_up >= 0 && requested_up_corrected <= allocated_up) || @@ -2332,7 +2333,7 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up, */ ret = tb_release_unused_usb3_bandwidth(tb, in, out); if (ret) - return ret; + goto fail; /* * Then go over all tunnels that cross the same USB4 ports (they @@ -2357,7 +2358,7 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up, *requested_down); if (ret) { tb_configure_sym(tb, in, out, 0, 0, true); - return ret; + goto fail; } ret = tb_tunnel_alloc_bandwidth(tunnel, requested_up, @@ -2372,6 +2373,18 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up, reclaim: tb_reclaim_usb3_bandwidth(tb, in, out); +fail: + if (ret) { + /* + * Write back the same allocated (so no change), this + * makes the DPTX request fail on graphics side. + */ + tb_tunnel_dbg(tunnel, + "failing the request by rewriting allocated %d/%d Mb/s\n", + allocated_up, allocated_down); + tb_tunnel_alloc_bandwidth(tunnel, &allocated_up, &allocated_down); + } + return ret; } From patchwork Tue Jan 23 13:26:24 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [15/44] thunderbolt: Introduce tb_tunnel_direction_downstream() From: Imre Deak X-Patchwork-Id: 575604 Message-Id: <20240123132653.413364-16-imre.deak@intel.com> To: intel-gfx-trybot@lists.freedesktop.org Date: Tue, 23 Jan 2024 15:26:24 +0200 From: Mika Westerberg This helper takes tunnel as parameter. Convert existing code to call this where possible. No functional changes. Signed-off-by: Mika Westerberg --- drivers/thunderbolt/tb.c | 9 +++------ drivers/thunderbolt/tunnel.c | 23 +++++++++-------------- drivers/thunderbolt/tunnel.h | 6 ++++++ 3 files changed, 18 insertions(+), 20 deletions(-) diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c index 349d93bac8f88..747cc964c24fe 100644 --- a/drivers/thunderbolt/tb.c +++ b/drivers/thunderbolt/tb.c @@ -1666,7 +1666,7 @@ tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group *group) "re-calculated estimated bandwidth %u/%u Mb/s\n", estimated_up, estimated_down); - if (tb_port_path_direction_downstream(in, out)) + if (tb_tunnel_direction_downstream(tunnel)) estimated_bw = estimated_down; else estimated_bw = estimated_up; @@ -2392,11 +2392,11 @@ static void tb_handle_dp_bandwidth_request(struct work_struct *work) { struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work); int requested_bw, requested_up, requested_down, ret; - struct tb_port *in, *out; struct tb_tunnel *tunnel; struct tb *tb = ev->tb; struct tb_cm *tcm = tb_priv(tb); struct tb_switch *sw; + struct tb_port *in; pm_runtime_get_sync(&tb->dev); @@ -2460,10 +2460,7 @@ static void tb_handle_dp_bandwidth_request(struct work_struct *work) tb_port_dbg(in, "requested bandwidth %d Mb/s\n", requested_bw); - - out = tunnel->dst_port; - - if (tb_port_path_direction_downstream(in, out)) { + if (tb_tunnel_direction_downstream(tunnel)) { requested_up = -1; requested_down = requested_bw; } else { diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c index a766ab2970645..e02b34654d296 100644 --- a/drivers/thunderbolt/tunnel.c +++ b/drivers/thunderbolt/tunnel.c @@ -706,7 +706,7 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel) "DP OUT maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n", out_rate, out_lanes, bw); - if (tb_port_path_direction_downstream(in, out)) + if (tb_tunnel_direction_downstream(tunnel)) max_bw = tunnel->max_down; else max_bw = tunnel->max_up; @@ -831,7 +831,7 @@ static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel) * max_up/down fields. For discovery we just read what the * estimation was set to. */ - if (tb_port_path_direction_downstream(in, out)) + if (tb_tunnel_direction_downstream(tunnel)) estimated_bw = tunnel->max_down; else estimated_bw = tunnel->max_up; @@ -971,7 +971,6 @@ static int tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up, int *consumed_down) { - struct tb_port *out = tunnel->dst_port; struct tb_port *in = tunnel->src_port; int ret, allocated_bw, max_bw_rounded; @@ -993,7 +992,7 @@ static int tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel *tunnel, if (allocated_bw == max_bw_rounded) allocated_bw = ret; - if (tb_port_path_direction_downstream(in, out)) { + if (tb_tunnel_direction_downstream(tunnel)) { *consumed_up = 0; *consumed_down = allocated_bw; } else { @@ -1007,7 +1006,6 @@ static int tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel *tunnel, static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up, int *allocated_down) { - struct tb_port *out = tunnel->dst_port; struct tb_port *in = tunnel->src_port; /* @@ -1029,7 +1027,7 @@ static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up if (allocated_bw == max_bw_rounded) allocated_bw = ret; - if (tb_port_path_direction_downstream(in, out)) { + if (tb_tunnel_direction_downstream(tunnel)) { *allocated_up = 0; *allocated_down = allocated_bw; } else { @@ -1046,7 +1044,6 @@ static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up, int *alloc_down) { - struct tb_port *out = tunnel->dst_port; struct tb_port *in = tunnel->src_port; int max_bw_rounded, ret, tmp; @@ -1057,7 +1054,7 @@ static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up, if (ret < 0) return ret; - if (tb_port_path_direction_downstream(in, out)) { + if (tb_tunnel_direction_downstream(tunnel)) { tmp = min(*alloc_down, max_bw_rounded); ret = usb4_dp_port_allocate_bandwidth(in, tmp); if (ret) @@ -1143,17 +1140,16 @@ static int tb_dp_read_cap(struct tb_tunnel *tunnel, unsigned int cap, u32 *rate, static int tb_dp_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up, int *max_down) { - struct tb_port *in = tunnel->src_port; int ret; - if (!usb4_dp_port_bandwidth_mode_enabled(in)) + if (!usb4_dp_port_bandwidth_mode_enabled(tunnel->src_port)) return -EOPNOTSUPP; ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, NULL); if (ret < 0) return ret; - if (tb_port_path_direction_downstream(in, tunnel->dst_port)) { + if (tb_tunnel_direction_downstream(tunnel)) { *max_up = 0; *max_down = ret; } else { @@ -1167,8 +1163,7 @@ static int tb_dp_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up, static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up, int *consumed_down) { - struct tb_port *in = tunnel->src_port; - const struct tb_switch *sw = in->sw; + const struct tb_switch *sw = tunnel->src_port->sw; u32 rate = 0, lanes = 0; int ret; @@ -1214,7 +1209,7 @@ static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up, return 0; } - if (tb_port_path_direction_downstream(in, tunnel->dst_port)) { + if (tb_tunnel_direction_downstream(tunnel)) { *consumed_up = 0; *consumed_down = tb_dp_bandwidth(rate, lanes); } else { diff --git a/drivers/thunderbolt/tunnel.h b/drivers/thunderbolt/tunnel.h index b4cff5482112d..1a27ccd08b861 100644 --- a/drivers/thunderbolt/tunnel.h +++ b/drivers/thunderbolt/tunnel.h @@ -139,6 +139,12 @@ static inline bool tb_tunnel_is_usb3(const struct tb_tunnel *tunnel) return tunnel->type == TB_TUNNEL_USB3; } +static inline bool tb_tunnel_direction_downstream(const struct tb_tunnel *tunnel) +{ + return tb_port_path_direction_downstream(tunnel->src_port, + tunnel->dst_port); +} + const char *tb_tunnel_type_name(const struct tb_tunnel *tunnel); #define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \ From patchwork Tue Jan 23 13:26:25 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [16/44] thunderbolt: Reserve released DisplayPort bandwidth for a group for 10 seconds From: Imre Deak X-Patchwork-Id: 575605 Message-Id: <20240123132653.413364-17-imre.deak@intel.com> To: intel-gfx-trybot@lists.freedesktop.org Date: Tue, 23 Jan 2024 15:26:25 +0200 From: Mika Westerberg The USB4 spec says that the Connection Manager should reserve the bandwidth that is released in the same group for 10 seconds before it can be shared with other groups. Add support for this. We also delay the symmetric transition by that same 10 seconds to avoid any unnecessary transitions (i.e if the released bandwidth is used by another DisplayPort tunnel in the same group the link can stay asymmetric the whole time). Signed-off-by: Mika Westerberg --- drivers/thunderbolt/tb.c | 347 ++++++++++++++++++++++++++------------- drivers/thunderbolt/tb.h | 7 + 2 files changed, 243 insertions(+), 111 deletions(-) diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c index 747cc964c24fe..59733521e7b01 100644 --- a/drivers/thunderbolt/tb.c +++ b/drivers/thunderbolt/tb.c @@ -17,6 +17,7 @@ #include "tunnel.h" #define TB_TIMEOUT 100 /* ms */ +#define TB_RELEASE_BW_TIMEOUT 10000 /* ms */ /* * Minimum bandwidth (in Mb/s) that is needed in the single transmitter/receiver @@ -75,6 +76,134 @@ struct tb_hotplug_event { bool unplug; }; +static void tb_handle_hotplug(struct work_struct *work); +static int tb_configure_sym(struct tb *tb, struct tb_port *src_port, + struct tb_port *dst_port, bool keep_asym); + +/** + * tb_find_unused_port() - return the first inactive port on @sw + * @sw: Switch to find the port on + * @type: Port type to look for + */ +static struct tb_port *tb_find_unused_port(struct tb_switch *sw, + enum tb_port_type type) +{ + struct tb_port *port; + + tb_switch_for_each_port(sw, port) { + if (tb_is_upstream_port(port)) + continue; + if (port->config.type != type) + continue; + if (!port->cap_adap) + continue; + if (tb_port_is_enabled(port)) + continue; + return port; + } + return NULL; +} + +static struct tb_port *tb_find_usb3_down(struct tb_switch *sw, + const struct tb_port *port) +{ + struct tb_port *down; + + down = usb4_switch_map_usb3_down(sw, port); + if (down && !tb_usb3_port_is_enabled(down)) + return down; + return NULL; +} + +static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type, + struct tb_port *src_port, + struct tb_port *dst_port) +{ + struct tb_cm *tcm = tb_priv(tb); + struct tb_tunnel *tunnel; + + list_for_each_entry(tunnel, &tcm->tunnel_list, list) { + if (tunnel->type == type && + ((src_port && src_port == tunnel->src_port) || + (dst_port && dst_port == tunnel->dst_port))) { + return tunnel; + } + } + + return NULL; +} + +static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb, + struct tb_port *src_port, + struct tb_port *dst_port) +{ + struct tb_port *port, *usb3_down; + struct tb_switch *sw; + + /* Pick the router that is deepest in the topology */ + if (tb_port_path_direction_downstream(src_port, dst_port)) + sw = dst_port->sw; + else + sw = src_port->sw; + + /* Can't be the host router */ + if (sw == tb->root_switch) + return NULL; + + /* Find the downstream USB4 port that leads to this router */ + port = tb_port_at(tb_route(sw), tb->root_switch); + /* Find the corresponding host router USB3 downstream port */ + usb3_down = usb4_switch_map_usb3_down(tb->root_switch, port); + if (!usb3_down) + return NULL; + + return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL); +} + +static void __release_group_bandwidth(struct tb_bandwidth_group *group) +{ + if (group->reserved) { + tb_dbg(group->tb, "group %d released total %d Mb/s\n", group->index, + group->reserved); + group->reserved = 0; + } +} + +static void __configure_group_sym(struct tb_bandwidth_group *group) +{ + struct tb_tunnel *tunnel; + struct tb_port *in; + + if (list_empty(&group->ports)) + return; + + /* + * All the tunnels in the group go through the same USB4 links + * so we find the first one here and pass the IN and OUT + * adapters to tb_configure_sym() which now transitions the + * links back to symmetric if bandwidth requirement < asym_threshold. + * + * We do this here to avoid unnecessary transitions (for example + * if the graphics released bandwidth for other tunnel in the + * same group). + */ + in = list_first_entry(&group->ports, struct tb_port, group_list); + tunnel = tb_find_tunnel(group->tb, TB_TUNNEL_DP, in, NULL); + if (tunnel) + tb_configure_sym(group->tb, in, tunnel->dst_port, true); +} + +static void tb_bandwidth_group_release_work(struct work_struct *work) +{ + struct tb_bandwidth_group *group = + container_of(work, typeof(*group), release_work.work); + + mutex_lock(&group->tb->lock); + __release_group_bandwidth(group); + __configure_group_sym(group); + mutex_unlock(&group->tb->lock); +} + static void tb_init_bandwidth_groups(struct tb_cm *tcm) { int i; @@ -85,6 +214,8 @@ static void tb_init_bandwidth_groups(struct tb_cm *tcm) group->tb = tcm_to_tb(tcm); group->index = i + 1; INIT_LIST_HEAD(&group->ports); + INIT_DELAYED_WORK(&group->release_work, + tb_bandwidth_group_release_work); } } @@ -178,11 +309,15 @@ static void tb_detach_bandwidth_group(struct tb_port *in) list_del_init(&in->group_list); tb_port_dbg(in, "detached from bandwidth group %d\n", group->index); + + /* No more tunnels so release the reserved bandwidth if any */ + if (list_empty(&group->ports)) { + if (cancel_delayed_work_sync(&group->release_work)) + __release_group_bandwidth(group); + } } } -static void tb_handle_hotplug(struct work_struct *work); - static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug) { struct tb_hotplug_event *ev; @@ -541,86 +676,6 @@ static void tb_scan_xdomain(struct tb_port *port) } } -/** - * tb_find_unused_port() - return the first inactive port on @sw - * @sw: Switch to find the port on - * @type: Port type to look for - */ -static struct tb_port *tb_find_unused_port(struct tb_switch *sw, - enum tb_port_type type) -{ - struct tb_port *port; - - tb_switch_for_each_port(sw, port) { - if (tb_is_upstream_port(port)) - continue; - if (port->config.type != type) - continue; - if (!port->cap_adap) - continue; - if (tb_port_is_enabled(port)) - continue; - return port; - } - return NULL; -} - -static struct tb_port *tb_find_usb3_down(struct tb_switch *sw, - const struct tb_port *port) -{ - struct tb_port *down; - - down = usb4_switch_map_usb3_down(sw, port); - if (down && !tb_usb3_port_is_enabled(down)) - return down; - return NULL; -} - -static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type, - struct tb_port *src_port, - struct tb_port *dst_port) -{ - struct tb_cm *tcm = tb_priv(tb); - struct tb_tunnel *tunnel; - - list_for_each_entry(tunnel, &tcm->tunnel_list, list) { - if (tunnel->type == type && - ((src_port && src_port == tunnel->src_port) || - (dst_port && dst_port == tunnel->dst_port))) { - return tunnel; - } - } - - return NULL; -} - -static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb, - struct tb_port *src_port, - struct tb_port *dst_port) -{ - struct tb_port *port, *usb3_down; - struct tb_switch *sw; - - /* Pick the router that is deepest in the topology */ - if (tb_port_path_direction_downstream(src_port, dst_port)) - sw = dst_port->sw; - else - sw = src_port->sw; - - /* Can't be the host router */ - if (sw == tb->root_switch) - return NULL; - - /* Find the downstream USB4 port that leads to this router */ - port = tb_port_at(tb_route(sw), tb->root_switch); - /* Find the corresponding host router USB3 downstream port */ - usb3_down = usb4_switch_map_usb3_down(tb->root_switch, port); - if (!usb3_down) - return NULL; - - return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL); -} - /** * tb_consumed_usb3_pcie_bandwidth() - Consumed USB3/PCIe bandwidth over a single link * @tb: Domain structure @@ -681,6 +736,10 @@ static int tb_consumed_usb3_pcie_bandwidth(struct tb *tb, * Calculates consumed DP bandwidth at @port between path from @src_port * to @dst_port. Does not take tunnel starting from @src_port and ending * from @src_port into account. + * + * If there is bandwidth reserved for any of the groups between + * @src_port and @dst_port (but not yet used) that is also taken into + * account in the returned consumed bandwidth. */ static int tb_consumed_dp_bandwidth(struct tb *tb, struct tb_port *src_port, @@ -689,9 +748,11 @@ static int tb_consumed_dp_bandwidth(struct tb *tb, int *consumed_up, int *consumed_down) { + int group_reserved[MAX_GROUPS] = {}; struct tb_cm *tcm = tb_priv(tb); struct tb_tunnel *tunnel; - int ret; + bool downstream; + int i, ret; *consumed_up = *consumed_down = 0; @@ -700,6 +761,7 @@ static int tb_consumed_dp_bandwidth(struct tb *tb, * their consumed bandwidth from the available. */ list_for_each_entry(tunnel, &tcm->tunnel_list, list) { + const struct tb_bandwidth_group *group; int dp_consumed_up, dp_consumed_down; if (tb_tunnel_is_invalid(tunnel)) @@ -711,6 +773,15 @@ static int tb_consumed_dp_bandwidth(struct tb *tb, if (!tb_tunnel_port_on_path(tunnel, port)) continue; + /* + * Calculate what is reserved for groups crossing the + * same ports only once (as that is reserved for all the + * tunnels in the group). + */ + group = tunnel->src_port->group; + if (group && group->reserved && !group_reserved[group->index]) + group_reserved[group->index] = group->reserved; + /* * Ignore the DP tunnel between src_port and dst_port * because it is the same tunnel and we may be @@ -729,6 +800,14 @@ static int tb_consumed_dp_bandwidth(struct tb *tb, *consumed_down += dp_consumed_down; } + downstream = tb_port_path_direction_downstream(src_port, dst_port); + for (i = 0; i < ARRAY_SIZE(group_reserved); i++) { + if (downstream) + *consumed_down += group_reserved[i]; + else + *consumed_up += group_reserved[i]; + } + return 0; } @@ -1181,8 +1260,6 @@ static int tb_configure_asym(struct tb *tb, struct tb_port *src_port, * @tb: Domain structure * @src_port: Source adapter to start the transition * @dst_port: Destination adapter - * @requested_up: New lower bandwidth request upstream (Mb/s) - * @requested_down: New lower bandwidth request downstream (Mb/s) * @keep_asym: Keep asymmetric link if preferred * * Goes over each link from @src_port to @dst_port and tries to @@ -1190,8 +1267,7 @@ static int tb_configure_asym(struct tb *tb, struct tb_port *src_port, * allows and link asymmetric preference is ignored (if @keep_asym is %false). */ static int tb_configure_sym(struct tb *tb, struct tb_port *src_port, - struct tb_port *dst_port, int requested_up, - int requested_down, bool keep_asym) + struct tb_port *dst_port, bool keep_asym) { bool clx = false, clx_disabled = false, downstream; struct tb_switch *sw; @@ -1230,10 +1306,10 @@ static int tb_configure_sym(struct tb *tb, struct tb_port *src_port, * guard band 10%) as the link was configured asymmetric * already. */ - if (consumed_down + requested_down >= asym_threshold) + if (consumed_down >= asym_threshold) continue; } else { - if (consumed_up + requested_up >= asym_threshold) + if (consumed_up >= asym_threshold) continue; } @@ -1306,7 +1382,7 @@ static void tb_configure_link(struct tb_port *down, struct tb_port *up, struct tb_port *host_port; host_port = tb_port_at(tb_route(sw), tb->root_switch); - tb_configure_sym(tb, host_port, up, 0, 0, false); + tb_configure_sym(tb, host_port, up, false); } /* Set the link configured */ @@ -1491,7 +1567,7 @@ static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel) * If bandwidth on a link is < asym_threshold * transition the link to symmetric. */ - tb_configure_sym(tb, src_port, dst_port, 0, 0, true); + tb_configure_sym(tb, src_port, dst_port, true); /* Now we can allow the domain to runtime suspend again */ pm_runtime_mark_last_busy(&dst_port->sw->dev); pm_runtime_put_autosuspend(&dst_port->sw->dev); @@ -1662,16 +1738,22 @@ tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group *group) * - available bandwidth along the path * - bandwidth allocated for USB 3.x but not used. */ - tb_tunnel_dbg(tunnel, - "re-calculated estimated bandwidth %u/%u Mb/s\n", - estimated_up, estimated_down); - if (tb_tunnel_direction_downstream(tunnel)) estimated_bw = estimated_down; else estimated_bw = estimated_up; - if (usb4_dp_port_set_estimated_bandwidth(in, estimated_bw)) + /* + * If there is reserved bandwidth for the group that is + * not yet released we report that too. + */ + tb_tunnel_dbg(tunnel, + "re-calculated estimated bandwidth %u (+ %u reserved) = %u Mb/s\n", + estimated_bw, group->reserved, + estimated_bw + group->reserved); + + if (usb4_dp_port_set_estimated_bandwidth(in, + estimated_bw + group->reserved)) tb_tunnel_warn(tunnel, "failed to update estimated bandwidth\n"); } @@ -2243,8 +2325,10 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up, int allocated_up, allocated_down, available_up, available_down, ret; int requested_up_corrected, requested_down_corrected, granularity; int max_up, max_down, max_up_rounded, max_down_rounded; + struct tb_bandwidth_group *group; struct tb *tb = tunnel->tb; struct tb_port *in, *out; + bool downstream; ret = tb_tunnel_allocated_bandwidth(tunnel, &allocated_up, &allocated_down); if (ret) @@ -2308,21 +2392,44 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up, goto fail; } + downstream = tb_tunnel_direction_downstream(tunnel); + group = in->group; + if ((*requested_up >= 0 && requested_up_corrected <= allocated_up) || (*requested_down >= 0 && requested_down_corrected <= allocated_down)) { - /* - * If bandwidth on a link is < asym_threshold transition - * the link to symmetric. - */ - tb_configure_sym(tb, in, out, *requested_up, *requested_down, true); - /* - * If requested bandwidth is less or equal than what is - * currently allocated to that tunnel we simply change - * the reservation of the tunnel. Since all the tunnels - * going out from the same USB4 port are in the same - * group the released bandwidth will be taken into - * account for the other tunnels automatically below. - */ + if (tunnel->bw_mode) { + int reserved; + /* + * If requested bandwidth is less or equal than + * what is currently allocated to that tunnel we + * simply change the reservation of the tunnel + * and add the released bandwidth for the group + * for the next 10s. Then we release it for + * others to use. + */ + if (downstream) + reserved = allocated_down - *requested_down; + else + reserved = allocated_up - *requested_up; + + if (reserved > 0) { + group->reserved += reserved; + tb_dbg(tb, "group %d reserved %d total %d Mb/s\n", + group->index, reserved, group->reserved); + + /* + * If it was not already pending, + * schedule release now. If it is then + * postpone it for the next 10s (unless + * it is already running in which case + * the 10s already expired and we should + * give the reserved back to others). + */ + mod_delayed_work(system_wq, &group->release_work, + msecs_to_jiffies(TB_RELEASE_BW_TIMEOUT)); + } + } + return tb_tunnel_alloc_bandwidth(tunnel, requested_up, requested_down); } @@ -2345,6 +2452,24 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up, if (ret) goto reclaim; + /* + * Add the reserved if any and clean it up. After this the all + * what is left will be given to anyone asking. + */ + if (group->reserved) { + tb_dbg(tb, "group %d using reserved %d Mb/s\n", group->index, + group->reserved); + + if (downstream) + available_down += group->reserved; + else + available_up += group->reserved; + + group->reserved = 0; + } + /* We already released the reservation so no need to clean up */ + cancel_delayed_work(&group->release_work); + tb_tunnel_dbg(tunnel, "bandwidth available for allocation %d/%d Mb/s\n", available_up, available_down); @@ -2357,7 +2482,7 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up, ret = tb_configure_asym(tb, in, out, *requested_up, *requested_down); if (ret) { - tb_configure_sym(tb, in, out, 0, 0, true); + tb_configure_sym(tb, in, out, true); goto fail; } @@ -2365,7 +2490,7 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up, requested_down); if (ret) { tb_tunnel_warn(tunnel, "failed to allocate bandwidth\n"); - tb_configure_sym(tb, in, out, 0, 0, true); + tb_configure_sym(tb, in, out, true); } } else { ret = -ENOBUFS; diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h index d0dfbf040356d..63d401c2ae60c 100644 --- a/drivers/thunderbolt/tb.h +++ b/drivers/thunderbolt/tb.h @@ -217,6 +217,11 @@ struct tb_switch { * @tb: Pointer to the domain the group belongs to * @index: Index of the group (aka Group_ID). Valid values %1-%7 * @ports: DP IN adapters belonging to this group are linked here + * @reserved: Bandwidth released by one tunnel in the group, available + * to others. This is reported as part of estimated_bw for + * the group. + * @release_work: Worker to release the @reserved if it is not used by + * any of the tunnels. * * Any tunnel that requires isochronous bandwidth (that's DP for now) is * attached to a bandwidth group. All tunnels going through the same @@ -227,6 +232,8 @@ struct tb_bandwidth_group { struct tb *tb; int index; struct list_head ports; + int reserved; + struct delayed_work release_work; }; /** From patchwork Tue Jan 23 13:26:26 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [17/44] thunderbolt: No need to transition the link to symmetric during suspend From: Imre Deak X-Patchwork-Id: 575589 Message-Id: <20240123132653.413364-18-imre.deak@intel.com> To: intel-gfx-trybot@lists.freedesktop.org Date: Tue, 23 Jan 2024 15:26:26 +0200 From: Mika Westerberg There is no point transitioning the link back to symmetric when suspending if it was asymmetric. The USB4 spec says the link should come up as it was configured prior entering sleep so this saves unnecessary link transitions. Signed-off-by: Mika Westerberg --- drivers/thunderbolt/tb.c | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c index 59733521e7b01..91e3f950a66ae 100644 --- a/drivers/thunderbolt/tb.c +++ b/drivers/thunderbolt/tb.c @@ -1540,7 +1540,8 @@ static void tb_scan_port(struct tb_port *port) } } -static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel) +static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel, + bool suspending) { struct tb_port *src_port, *dst_port; struct tb *tb; @@ -1564,10 +1565,12 @@ static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel) */ tb_switch_dealloc_dp_resource(src_port->sw, src_port); /* - * If bandwidth on a link is < asym_threshold - * transition the link to symmetric. + * If bandwidth on a link is < asym_threshold transition + * the link to symmetric. But only if we are not + * entering suspend. */ - tb_configure_sym(tb, src_port, dst_port, true); + if (!suspending) + tb_configure_sym(tb, src_port, dst_port, true); /* Now we can allow the domain to runtime suspend again */ pm_runtime_mark_last_busy(&dst_port->sw->dev); pm_runtime_put_autosuspend(&dst_port->sw->dev); @@ -1601,7 +1604,7 @@ static void tb_free_invalid_tunnels(struct tb *tb) list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { if (tb_tunnel_is_invalid(tunnel)) - tb_deactivate_and_free_tunnel(tunnel); + tb_deactivate_and_free_tunnel(tunnel, false); } } @@ -1985,7 +1988,7 @@ static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port) } tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out); - tb_deactivate_and_free_tunnel(tunnel); + tb_deactivate_and_free_tunnel(tunnel, false); list_del_init(&port->list); /* @@ -2028,7 +2031,7 @@ static void tb_disconnect_and_release_dp(struct tb *tb) */ list_for_each_entry_safe_reverse(tunnel, n, &tcm->tunnel_list, list) { if (tb_tunnel_is_dp(tunnel)) - tb_deactivate_and_free_tunnel(tunnel); + tb_deactivate_and_free_tunnel(tunnel, true); } while (!list_empty(&tcm->dp_resources)) { @@ -2175,7 +2178,7 @@ static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, if (tb_tunnel_match_dma(tunnel, transmit_path, transmit_ring, receive_path, receive_ring)) - tb_deactivate_and_free_tunnel(tunnel); + tb_deactivate_and_free_tunnel(tunnel, false); } /* From patchwork Tue Jan 23 13:26:27 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [18/44] thunderbolt: Make XDomain lane bonding comply with the USB4 v2 spec From: Imre Deak X-Patchwork-Id: 575591 Message-Id: <20240123132653.413364-19-imre.deak@intel.com> To: intel-gfx-trybot@lists.freedesktop.org Date: Tue, 23 Jan 2024 15:26:27 +0200 From: Mika Westerberg The USB4 v2 Inter-Domain spec "unified" the lane bonding flow so that when the other end (with higher UUID) is not yet set the target link width accordingly it is expected to reply with ERROR_NOT_READY. Implement this for Linux. Signed-off-by: Mika Westerberg --- drivers/thunderbolt/xdomain.c | 65 +++++++++++++++++++++++++---------- 1 file changed, 47 insertions(+), 18 deletions(-) diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c index b48df88981bda..78399dd4cecd3 100644 --- a/drivers/thunderbolt/xdomain.c +++ b/drivers/thunderbolt/xdomain.c @@ -535,29 +535,19 @@ static int tb_xdp_link_state_status_request(struct tb_ctl *ctl, u64 route, } static int tb_xdp_link_state_status_response(struct tb *tb, struct tb_ctl *ctl, - struct tb_xdomain *xd, u8 sequence) + struct tb_xdomain *xd, u8 sequence, + u8 slw, u8 sls, u8 tls, u8 tlw) { struct tb_xdp_link_state_status_response res; - struct tb_port *port = tb_xdomain_downstream_port(xd); - u32 val[2]; - int ret; memset(&res, 0, sizeof(res)); tb_xdp_fill_header(&res.hdr, xd->route, sequence, LINK_STATE_STATUS_RESPONSE, sizeof(res)); - ret = tb_port_read(port, val, TB_CFG_PORT, - port->cap_phy + LANE_ADP_CS_0, ARRAY_SIZE(val)); - if (ret) - return ret; - - res.slw = (val[0] & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >> - LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT; - res.sls = (val[0] & LANE_ADP_CS_0_SUPPORTED_SPEED_MASK) >> - LANE_ADP_CS_0_SUPPORTED_SPEED_SHIFT; - res.tls = val[1] & LANE_ADP_CS_1_TARGET_SPEED_MASK; - res.tlw = (val[1] & LANE_ADP_CS_1_TARGET_WIDTH_MASK) >> - LANE_ADP_CS_1_TARGET_WIDTH_SHIFT; + res.slw = slw; + res.sls = sls; + res.tls = tls; + res.tlw = tlw; return __tb_xdomain_response(ctl, &res, sizeof(res), TB_CFG_PKG_XDOMAIN_RESP); @@ -802,8 +792,47 @@ static void tb_xdp_handle_request(struct work_struct *work) route); if (xd) { - ret = tb_xdp_link_state_status_response(tb, ctl, xd, - sequence); + struct tb_port *port = tb_xdomain_downstream_port(xd); + u8 slw, sls, tls, tlw; + u32 val[2]; + + /* + * Read the adapter supported and target widths + * and speeds. + */ + ret = tb_port_read(port, val, TB_CFG_PORT, + port->cap_phy + LANE_ADP_CS_0, + ARRAY_SIZE(val)); + if (ret) + break; + + slw = (val[0] & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >> + LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT; + sls = (val[0] & LANE_ADP_CS_0_SUPPORTED_SPEED_MASK) >> + LANE_ADP_CS_0_SUPPORTED_SPEED_SHIFT; + tls = val[1] & LANE_ADP_CS_1_TARGET_SPEED_MASK; + tlw = (val[1] & LANE_ADP_CS_1_TARGET_WIDTH_MASK) >> + LANE_ADP_CS_1_TARGET_WIDTH_SHIFT; + + /* + * When we have higher UUID, we are supposed to + * return ERROR_NOT_READY if the tlw is not yet + * set according to the Inter-Domain spec for + * USB4 v2. + */ + if (xd->state == XDOMAIN_STATE_BONDING_UUID_HIGH && + xd->target_link_width && + xd->target_link_width != tlw) { + tb_dbg(tb, "%llx: target link width not yet set %#x != %#x\n", + route, tlw, xd->target_link_width); + tb_xdp_error_response(ctl, route, sequence, + ERROR_NOT_READY); + } else { + tb_dbg(tb, "%llx: replying with target link width set to %#x\n", + route, tlw); + ret = tb_xdp_link_state_status_response(tb, ctl, + xd, sequence, slw, sls, tls, tlw); + } } else { tb_xdp_error_response(ctl, route, sequence, ERROR_NOT_READY); From patchwork Tue Jan 23 13:26:28 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [19/44] thunderbolt: Split out margining from USB4 port From: Imre Deak X-Patchwork-Id: 575590 Message-Id: <20240123132653.413364-20-imre.deak@intel.com> To: intel-gfx-trybot@lists.freedesktop.org Date: Tue, 23 Jan 2024 15:26:28 +0200 From: Mika Westerberg We are going to expand lane margining support for retimers too so split out the generic margining functionality out of being specific to USB4 ports. Signed-off-by: Mika Westerberg --- drivers/thunderbolt/debugfs.c | 232 ++++++++++++++++------------------ 1 file changed, 111 insertions(+), 121 deletions(-) diff --git a/drivers/thunderbolt/debugfs.c b/drivers/thunderbolt/debugfs.c index e324cd8997193..4f50afd2516f7 100644 --- a/drivers/thunderbolt/debugfs.c +++ b/drivers/thunderbolt/debugfs.c @@ -194,6 +194,7 @@ static ssize_t switch_regs_write(struct file *file, const char __user *user_buf, #if IS_ENABLED(CONFIG_USB4_DEBUGFS_MARGINING) /** * struct tb_margining - Lane margining support + * @port: USB4 port through which the margining operations are run * @caps: Port lane margining capabilities * @results: Last lane margining results * @lanes: %0, %1 or %7 (all) @@ -210,6 +211,7 @@ static ssize_t switch_regs_write(struct file *file, const char __user *user_buf, * right/high */ struct tb_margining { + struct tb_port *port; u32 caps[2]; u32 results[2]; unsigned int lanes; @@ -225,36 +227,38 @@ struct tb_margining { bool right_high; }; -static bool supports_software(const struct usb4_port *usb4) +static bool supports_software(const struct tb_margining *margining) { - return usb4->margining->caps[0] & USB4_MARGIN_CAP_0_MODES_SW; + return margining->caps[0] & USB4_MARGIN_CAP_0_MODES_SW; } -static bool supports_hardware(const struct usb4_port *usb4) +static bool supports_hardware(const struct tb_margining *margining) { - return usb4->margining->caps[0] & USB4_MARGIN_CAP_0_MODES_HW; + return margining->caps[0] & USB4_MARGIN_CAP_0_MODES_HW; } -static bool both_lanes(const struct usb4_port *usb4) +static bool both_lanes(const struct tb_margining *margining) { - return usb4->margining->caps[0] & USB4_MARGIN_CAP_0_2_LANES; + return margining->caps[0] & USB4_MARGIN_CAP_0_2_LANES; } -static unsigned int independent_voltage_margins(const struct usb4_port *usb4) +static unsigned int +independent_voltage_margins(const struct tb_margining *margining) { - return (usb4->margining->caps[0] & USB4_MARGIN_CAP_0_VOLTAGE_INDP_MASK) >> + return (margining->caps[0] & USB4_MARGIN_CAP_0_VOLTAGE_INDP_MASK) >> USB4_MARGIN_CAP_0_VOLTAGE_INDP_SHIFT; } -static bool supports_time(const struct usb4_port *usb4) +static bool supports_time(const struct tb_margining *margining) { - return usb4->margining->caps[0] & USB4_MARGIN_CAP_0_TIME; + return margining->caps[0] & USB4_MARGIN_CAP_0_TIME; } /* Only applicable if supports_time() returns true */ -static unsigned int independent_time_margins(const struct usb4_port *usb4) +static unsigned int +independent_time_margins(const struct tb_margining *margining) { - return (usb4->margining->caps[1] & USB4_MARGIN_CAP_1_TIME_INDP_MASK) >> + return (margining->caps[1] & USB4_MARGIN_CAP_1_TIME_INDP_MASK) >> USB4_MARGIN_CAP_1_TIME_INDP_SHIFT; } @@ -263,9 +267,8 @@ margining_ber_level_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct seq_file *s = file->private_data; - struct tb_port *port = s->private; - struct usb4_port *usb4 = port->usb4; - struct tb *tb = port->sw->tb; + struct tb_margining *margining = s->private; + struct tb *tb = margining->port->sw->tb; unsigned int val; int ret = 0; char *buf; @@ -273,7 +276,7 @@ margining_ber_level_write(struct file *file, const char __user *user_buf, if (mutex_lock_interruptible(&tb->lock)) return -ERESTARTSYS; - if (usb4->margining->software) { + if (margining->software) { ret = -EINVAL; goto out_unlock; } @@ -290,13 +293,13 @@ margining_ber_level_write(struct file *file, const char __user *user_buf, if (ret) goto out_free; - if (val < usb4->margining->min_ber_level || - val > usb4->margining->max_ber_level) { + if (val < margining->min_ber_level || + val > margining->max_ber_level) { ret = -EINVAL; goto out_free; } - usb4->margining->ber_level = val; + margining->ber_level = val; out_free: free_page((unsigned long)buf); @@ -316,52 +319,50 @@ static void ber_level_show(struct seq_file *s, unsigned int val) static int margining_ber_level_show(struct seq_file *s, void *not_used) { - struct tb_port *port = s->private; - struct usb4_port *usb4 = port->usb4; + const struct tb_margining *margining = s->private; - if (usb4->margining->software) + if (margining->software) return -EINVAL; - ber_level_show(s, usb4->margining->ber_level); + ber_level_show(s, margining->ber_level); return 0; } DEBUGFS_ATTR_RW(margining_ber_level); static int margining_caps_show(struct seq_file *s, void *not_used) { - struct tb_port *port = s->private; - struct usb4_port *usb4 = port->usb4; - struct tb *tb = port->sw->tb; + struct tb_margining *margining = s->private; + struct tb *tb = margining->port->sw->tb; u32 cap0, cap1; if (mutex_lock_interruptible(&tb->lock)) return -ERESTARTSYS; /* Dump the raw caps first */ - cap0 = usb4->margining->caps[0]; + cap0 = margining->caps[0]; seq_printf(s, "0x%08x\n", cap0); - cap1 = usb4->margining->caps[1]; + cap1 = margining->caps[1]; seq_printf(s, "0x%08x\n", cap1); seq_printf(s, "# software margining: %s\n", - supports_software(usb4) ? "yes" : "no"); - if (supports_hardware(usb4)) { + supports_software(margining) ? "yes" : "no"); + if (supports_hardware(margining)) { seq_puts(s, "# hardware margining: yes\n"); seq_puts(s, "# minimum BER level contour: "); - ber_level_show(s, usb4->margining->min_ber_level); + ber_level_show(s, margining->min_ber_level); seq_puts(s, "# maximum BER level contour: "); - ber_level_show(s, usb4->margining->max_ber_level); + ber_level_show(s, margining->max_ber_level); } else { seq_puts(s, "# hardware margining: no\n"); } seq_printf(s, "# both lanes simultaneously: %s\n", - both_lanes(usb4) ? "yes" : "no"); + both_lanes(margining) ? "yes" : "no"); seq_printf(s, "# voltage margin steps: %u\n", - usb4->margining->voltage_steps); + margining->voltage_steps); seq_printf(s, "# maximum voltage offset: %u mV\n", - usb4->margining->max_voltage_offset); + margining->max_voltage_offset); - switch (independent_voltage_margins(usb4)) { + switch (independent_voltage_margins(margining)) { case USB4_MARGIN_CAP_0_VOLTAGE_MIN: seq_puts(s, "# returns minimum between high and low voltage margins\n"); break; @@ -373,12 +374,12 @@ static int margining_caps_show(struct seq_file *s, void *not_used) break; } - if (supports_time(usb4)) { + if (supports_time(margining)) { seq_puts(s, "# time margining: yes\n"); seq_printf(s, "# time margining is destructive: %s\n", cap1 & USB4_MARGIN_CAP_1_TIME_DESTR ? "yes" : "no"); - switch (independent_time_margins(usb4)) { + switch (independent_time_margins(margining)) { case USB4_MARGIN_CAP_1_TIME_MIN: seq_puts(s, "# returns minimum between left and right time margins\n"); break; @@ -391,9 +392,9 @@ static int margining_caps_show(struct seq_file *s, void *not_used) } seq_printf(s, "# time margin steps: %u\n", - usb4->margining->time_steps); + margining->time_steps); seq_printf(s, "# maximum time offset: %u mUI\n", - usb4->margining->max_time_offset); + margining->max_time_offset); } else { seq_puts(s, "# time margining: no\n"); } @@ -408,9 +409,8 @@ margining_lanes_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct seq_file *s = file->private_data; - struct tb_port *port = s->private; - struct usb4_port *usb4 = port->usb4; - struct tb *tb = port->sw->tb; + struct tb_margining *margining = s->private; + struct tb *tb = margining->port->sw->tb; int ret = 0; char *buf; @@ -426,13 +426,13 @@ margining_lanes_write(struct file *file, const char __user *user_buf, } if (!strcmp(buf, "0")) { - usb4->margining->lanes = 0; + margining->lanes = 0; } else if (!strcmp(buf, "1")) { - usb4->margining->lanes = 1; + margining->lanes = 1; } else if (!strcmp(buf, "all")) { /* Needs to be supported */ - if (both_lanes(usb4)) - usb4->margining->lanes = 7; + if (both_lanes(margining)) + margining->lanes = 7; else ret = -EINVAL; } else { @@ -448,16 +448,15 @@ margining_lanes_write(struct file *file, const char __user *user_buf, static int margining_lanes_show(struct seq_file *s, void *not_used) { - struct tb_port *port = s->private; - struct usb4_port *usb4 = port->usb4; - struct tb *tb = port->sw->tb; + struct tb_margining *margining = s->private; + struct tb *tb = margining->port->sw->tb; unsigned int lanes; if (mutex_lock_interruptible(&tb->lock)) return -ERESTARTSYS; - lanes = usb4->margining->lanes; - if (both_lanes(usb4)) { + lanes = margining->lanes; + if (both_lanes(margining)) { if (!lanes) seq_puts(s, "[0] 1 all\n"); else if (lanes == 1) @@ -481,9 +480,8 @@ static ssize_t margining_mode_write(struct file *file, size_t count, loff_t *ppos) { struct seq_file *s = file->private_data; - struct tb_port *port = s->private; - struct usb4_port *usb4 = port->usb4; - struct tb *tb = port->sw->tb; + struct tb_margining *margining = s->private; + struct tb *tb = margining->port->sw->tb; int ret = 0; char *buf; @@ -499,13 +497,13 @@ static ssize_t margining_mode_write(struct file *file, } if (!strcmp(buf, "software")) { - if (supports_software(usb4)) - usb4->margining->software = true; + if (supports_software(margining)) + margining->software = true; else ret = -EINVAL; } else if (!strcmp(buf, "hardware")) { - if (supports_hardware(usb4)) - usb4->margining->software = false; + if (supports_hardware(margining)) + margining->software = false; else ret = -EINVAL; } else { @@ -521,23 +519,22 @@ static ssize_t margining_mode_write(struct file *file, static int margining_mode_show(struct seq_file *s, void *not_used) { - const struct tb_port *port = s->private; - const struct usb4_port *usb4 = port->usb4; - struct tb *tb = port->sw->tb; + struct tb_margining *margining = s->private; + struct tb *tb = margining->port->sw->tb; const char *space = ""; if (mutex_lock_interruptible(&tb->lock)) return -ERESTARTSYS; - if (supports_software(usb4)) { - if (usb4->margining->software) + if (supports_software(margining)) { + if (margining->software) seq_puts(s, "[software]"); else seq_puts(s, "software"); space = " "; } - if (supports_hardware(usb4)) { - if (usb4->margining->software) + if (supports_hardware(margining)) { + if (margining->software) seq_printf(s, "%shardware", space); else seq_printf(s, "%s[hardware]", space); @@ -552,10 +549,9 @@ DEBUGFS_ATTR_RW(margining_mode); static int margining_run_write(void *data, u64 val) { - struct tb_port *port = data; - struct usb4_port *usb4 = port->usb4; + struct tb_margining *margining = data; + struct tb_port *port = margining->port; struct tb_switch *sw = port->sw; - struct tb_margining *margining; struct tb_switch *down_sw; struct tb *tb = sw->tb; int ret, clx; @@ -590,8 +586,6 @@ static int margining_run_write(void *data, u64 val) clx = ret; } - margining = usb4->margining; - if (margining->software) { tb_port_dbg(port, "running software %s lane margining for lanes %u\n", margining->time ? "time" : "voltage", margining->lanes); @@ -632,16 +626,15 @@ static ssize_t margining_results_write(struct file *file, size_t count, loff_t *ppos) { struct seq_file *s = file->private_data; - struct tb_port *port = s->private; - struct usb4_port *usb4 = port->usb4; - struct tb *tb = port->sw->tb; + struct tb_margining *margining = s->private; + struct tb *tb = margining->port->sw->tb; if (mutex_lock_interruptible(&tb->lock)) return -ERESTARTSYS; /* Just clear the results */ - usb4->margining->results[0] = 0; - usb4->margining->results[1] = 0; + margining->results[0] = 0; + margining->results[1] = 0; mutex_unlock(&tb->lock); return count; @@ -675,15 +668,12 @@ static void time_margin_show(struct seq_file *s, static int margining_results_show(struct seq_file *s, void *not_used) { - struct tb_port *port = s->private; - struct usb4_port *usb4 = port->usb4; - struct tb_margining *margining; - struct tb *tb = port->sw->tb; + struct tb_margining *margining = s->private; + struct tb *tb = margining->port->sw->tb; if (mutex_lock_interruptible(&tb->lock)) return -ERESTARTSYS; - margining = usb4->margining; /* Dump the raw results first */ seq_printf(s, "0x%08x\n", margining->results[0]); /* Only the hardware margining has two result dwords */ @@ -745,9 +735,8 @@ static ssize_t margining_test_write(struct file *file, size_t count, loff_t *ppos) { struct seq_file *s = file->private_data; - struct tb_port *port = s->private; - struct usb4_port *usb4 = port->usb4; - struct tb *tb = port->sw->tb; + struct tb_margining *margining = s->private; + struct tb *tb = margining->port->sw->tb; int ret = 0; char *buf; @@ -762,10 +751,10 @@ static ssize_t margining_test_write(struct file *file, goto out_free; } - if (!strcmp(buf, "time") && supports_time(usb4)) - usb4->margining->time = true; + if (!strcmp(buf, "time") && supports_time(margining)) + margining->time = true; else if (!strcmp(buf, "voltage")) - usb4->margining->time = false; + margining->time = false; else ret = -EINVAL; @@ -778,15 +767,14 @@ static ssize_t margining_test_write(struct file *file, static int margining_test_show(struct seq_file *s, void *not_used) { - struct tb_port *port = s->private; - struct usb4_port *usb4 = port->usb4; - struct tb *tb = port->sw->tb; + struct tb_margining *margining = s->private; + struct tb *tb = margining->port->sw->tb; if (mutex_lock_interruptible(&tb->lock)) return -ERESTARTSYS; - if (supports_time(usb4)) { - if (usb4->margining->time) + if (supports_time(margining)) { + if (margining->time) seq_puts(s, "voltage [time]\n"); else seq_puts(s, "[voltage] time\n"); @@ -804,9 +792,8 @@ static ssize_t margining_margin_write(struct file *file, size_t count, loff_t *ppos) { struct seq_file *s = file->private_data; - struct tb_port *port = s->private; - struct usb4_port *usb4 = port->usb4; - struct tb *tb = port->sw->tb; + struct tb_margining *margining = s->private; + struct tb *tb = margining->port->sw->tb; int ret = 0; char *buf; @@ -821,18 +808,18 @@ static ssize_t margining_margin_write(struct file *file, goto out_free; } - if (usb4->margining->time) { + if (margining->time) { if (!strcmp(buf, "left")) - usb4->margining->right_high = false; + margining->right_high = false; else if (!strcmp(buf, "right")) - usb4->margining->right_high = true; + margining->right_high = true; else ret = -EINVAL; } else { if (!strcmp(buf, "low")) - usb4->margining->right_high = false; + margining->right_high = false; else if (!strcmp(buf, "high")) - usb4->margining->right_high = true; + margining->right_high = true; else ret = -EINVAL; } @@ -846,20 +833,19 @@ static ssize_t margining_margin_write(struct file *file, static int margining_margin_show(struct seq_file *s, void *not_used) { - struct tb_port *port = s->private; - struct usb4_port *usb4 = port->usb4; - struct tb *tb = port->sw->tb; + struct tb_margining *margining = s->private; + struct tb *tb = margining->port->sw->tb; if (mutex_lock_interruptible(&tb->lock)) return -ERESTARTSYS; - if (usb4->margining->time) { - if (usb4->margining->right_high) + if (margining->time) { + if (margining->right_high) seq_puts(s, "left [right]\n"); else seq_puts(s, "[left] right\n"); } else { - if (usb4->margining->right_high) + if (margining->right_high) seq_puts(s, "low [high]\n"); else seq_puts(s, "[low] high\n"); @@ -890,16 +876,16 @@ static void margining_port_init(struct tb_port *port) if (!margining) return; + margining->port = port; + ret = usb4_port_margining_caps(port, margining->caps); if (ret) { kfree(margining); return; } - usb4->margining = margining; - /* Set the initial mode */ - if (supports_software(usb4)) + if (supports_software(margining)) margining->software = true; val = (margining->caps[0] & USB4_MARGIN_CAP_0_VOLTAGE_STEPS_MASK) >> @@ -909,7 +895,7 @@ static void margining_port_init(struct tb_port *port) USB4_MARGIN_CAP_0_MAX_VOLTAGE_OFFSET_SHIFT; margining->max_voltage_offset = 74 + val * 2; - if (supports_time(usb4)) { + if (supports_time(margining)) { val = (margining->caps[1] & USB4_MARGIN_CAP_1_TIME_STEPS_MASK) >> USB4_MARGIN_CAP_1_TIME_STEPS_SHIFT; margining->time_steps = val; @@ -923,7 +909,7 @@ static void margining_port_init(struct tb_port *port) } dir = debugfs_create_dir("margining", parent); - if (supports_hardware(usb4)) { + if (supports_hardware(margining)) { val = (margining->caps[1] & USB4_MARGIN_CAP_1_MIN_BER_MASK) >> USB4_MARGIN_CAP_1_MIN_BER_SHIFT; margining->min_ber_level = val; @@ -934,18 +920,22 @@ static void margining_port_init(struct tb_port *port) /* Set the default to minimum */ margining->ber_level = margining->min_ber_level; - debugfs_create_file("ber_level_contour", 0400, dir, port, + debugfs_create_file("ber_level_contour", 0400, dir, margining, &margining_ber_level_fops); } - debugfs_create_file("caps", 0400, dir, port, &margining_caps_fops); - debugfs_create_file("lanes", 0600, dir, port, &margining_lanes_fops); - debugfs_create_file("mode", 0600, dir, port, &margining_mode_fops); - debugfs_create_file("run", 0600, dir, port, &margining_run_fops); - debugfs_create_file("results", 0600, dir, port, &margining_results_fops); - debugfs_create_file("test", 0600, dir, port, &margining_test_fops); - if (independent_voltage_margins(usb4) || - (supports_time(usb4) && independent_time_margins(usb4))) - debugfs_create_file("margin", 0600, dir, port, &margining_margin_fops); + debugfs_create_file("caps", 0400, dir, margining, &margining_caps_fops); + debugfs_create_file("lanes", 0600, dir, margining, &margining_lanes_fops); + debugfs_create_file("mode", 0600, dir, margining, &margining_mode_fops); + debugfs_create_file("run", 0600, dir, margining, &margining_run_fops); + debugfs_create_file("results", 0600, dir, margining, + &margining_results_fops); + debugfs_create_file("test", 0600, dir, margining, &margining_test_fops); + if (independent_voltage_margins(margining) || + (supports_time(margining) && independent_time_margins(margining))) + debugfs_create_file("margin", 0600, dir, margining, + &margining_margin_fops); + + usb4->margining = margining; } static void margining_port_remove(struct tb_port *port) From patchwork Tue Jan 23 13:26:29 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [20/44] thunderbolt: Add sideband register access to debugfs From: Imre Deak X-Patchwork-Id: 575608 Message-Id: <20240123132653.413364-21-imre.deak@intel.com> To: intel-gfx-trybot@lists.freedesktop.org Date: Tue, 23 Jan 2024 15:26:29 +0200 From: Mika Westerberg This makes it possible to read and write USB4 port and retimer sideband registers through debugfs which is useful for debugging and manufacturing purposes. We add "sb_regs" debugfs attribute under each USB4 port and retimer that is used to access the sideband. Signed-off-by: Mika Westerberg --- drivers/thunderbolt/debugfs.c | 268 ++++++++++++++++++++++++++++++++++ drivers/thunderbolt/retimer.c | 10 +- drivers/thunderbolt/sb_regs.h | 3 + drivers/thunderbolt/tb.h | 28 +++- drivers/thunderbolt/usb4.c | 112 ++++++-------- 5 files changed, 346 insertions(+), 75 deletions(-) diff --git a/drivers/thunderbolt/debugfs.c b/drivers/thunderbolt/debugfs.c index 4f50afd2516f7..8a032be229035 100644 --- a/drivers/thunderbolt/debugfs.c +++ b/drivers/thunderbolt/debugfs.c @@ -33,6 +33,23 @@ #define COUNTER_SET_LEN 3 +struct sb_reg { + unsigned int reg; + unsigned int size; +}; + +/* Sideband registers and their sizes as defined in the USB4 spec */ +static const struct sb_reg sb_regs[] = { + { USB4_SB_VENDOR_ID, 4 }, + { USB4_SB_PRODUCT_ID, 4 }, + { USB4_SB_OPCODE, 4 }, + { USB4_SB_METADATA, 4 }, + { USB4_SB_LINK_CONF, 3 }, + { USB4_SB_TXFFE, 4 }, + { USB4_SB_VERSION, 4 }, + { USB4_SB_DATA, 64 }, +}; + #define DEBUGFS_ATTR(__space, __write) \ static int __space ## _open(struct inode *inode, struct file *file) \ { \ @@ -184,10 +201,156 @@ static ssize_t switch_regs_write(struct file *file, const char __user *user_buf, return regs_write(sw, NULL, user_buf, count, ppos); } + +static bool parse_sb_line(char **line, u32 *reg, u32 *offs, u32 *val, + unsigned int *size) +{ + char *token; + int i, ret; + u32 v[3]; + + token = strsep(line, "\n"); + if (!token) + return false; + + /* + * Sideband register write we expect either + * # register value + * v[0] v[1]\n + * + * or + * + * # register offset value + * v[0] v[1] v[2]\n + * + * Here offset is double word index. + */ + ret = sscanf(token, "%i %i %i", &v[0], &v[1], &v[2]); + if (ret == 3) { + *offs = v[1]; + *val = v[2]; + } else if (ret == 2) { + *offs = 0; + *val = v[1]; + } else { + return false; + } + + *reg = v[0]; + + for (i = 0; i < ARRAY_SIZE(sb_regs); i++) { + if (*reg == sb_regs[i].reg) { + if (*offs >= DIV_ROUND_UP(sb_regs[i].size, 4)) + return false; + *size = sb_regs[i].size; + return true; + } + } + + return false; +} + +static ssize_t sb_regs_write(struct tb_port *port, enum usb4_sb_target target, + u8 index, char *buf, size_t count, loff_t *ppos) +{ + u32 reg, val, offset, size; + char *line = buf; + + /* User did hardware changes behind the driver's back */ + add_taint(TAINT_USER, LOCKDEP_STILL_OK); + + while (parse_sb_line(&line, ®, &offset, &val, &size)) { + u32 data[16]; + int ret; + + memset(data, 0, sizeof(data)); + + /* Read the whole register if larger than one double word */ + if (size > 1) { + ret = usb4_port_sb_read(port, target, index, reg, data, + size); + if (ret) + return ret; + } + + data[offset] = val; + + ret = usb4_port_sb_write(port, target, index, reg, data, size); + if (ret) + return ret; + } + + return 0; +} + +static ssize_t port_sb_regs_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct seq_file *s = file->private_data; + struct tb_port *port = s->private; + struct tb_switch *sw = port->sw; + struct tb *tb = sw->tb; + char *buf; + int ret; + + buf = validate_and_copy_from_user(user_buf, &count); + if (IS_ERR(buf)) + return PTR_ERR(buf); + + pm_runtime_get_sync(&sw->dev); + + if (mutex_lock_interruptible(&tb->lock)) { + ret = -ERESTARTSYS; + goto out_rpm_put; + } + + ret = sb_regs_write(port, USB4_SB_TARGET_ROUTER, 0, buf, count, ppos); + + mutex_unlock(&tb->lock); +out_rpm_put: + pm_runtime_mark_last_busy(&sw->dev); + pm_runtime_put_autosuspend(&sw->dev); + + return ret < 0 ? ret : count; +} + +static ssize_t retimer_sb_regs_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct seq_file *s = file->private_data; + struct tb_retimer *rt = s->private; + struct tb *tb = rt->tb; + char *buf; + int ret; + + buf = validate_and_copy_from_user(user_buf, &count); + if (IS_ERR(buf)) + return PTR_ERR(buf); + + pm_runtime_get_sync(&rt->dev); + + if (mutex_lock_interruptible(&tb->lock)) { + ret = -ERESTARTSYS; + goto out_rpm_put; + } + + ret = sb_regs_write(rt->port, USB4_SB_TARGET_RETIMER, rt->index, buf, + count, ppos); + + mutex_unlock(&tb->lock); +out_rpm_put: + pm_runtime_mark_last_busy(&rt->dev); + pm_runtime_put_autosuspend(&rt->dev); + + return ret < 0 ? ret : count; +} #define DEBUGFS_MODE 0600 #else #define port_regs_write NULL #define switch_regs_write NULL +#define port_sb_regs_write NULL +#define retimer_sb_regs_write NULL #define DEBUGFS_MODE 0400 #endif @@ -1494,6 +1657,58 @@ static int counters_show(struct seq_file *s, void *not_used) } DEBUGFS_ATTR_RW(counters); +static int sb_regs_show(struct tb_port *port, enum usb4_sb_target target, + u8 index, struct seq_file *s) +{ + int ret, i; + + seq_puts(s, "# register value\n"); + + for (i = 0; i < ARRAY_SIZE(sb_regs); i++) { + const struct sb_reg *regs = &sb_regs[i]; + u32 data[16]; + int j; + + memset(data, 0, sizeof(data)); + ret = usb4_port_sb_read(port, target, index, regs->reg, data, + regs->size); + if (ret) + return ret; + + seq_printf(s, "0x%04x", regs->reg); + for (j = 0; j < DIV_ROUND_UP(regs->size, 4); j++) + seq_printf(s, " 0x%08x", data[j]); + seq_puts(s, "\n"); + } + + return 0; +} + +static int port_sb_regs_show(struct seq_file *s, void *not_used) +{ + struct tb_port *port = s->private; + struct tb_switch *sw = port->sw; + struct tb *tb = sw->tb; + int ret; + + pm_runtime_get_sync(&sw->dev); + + if (mutex_lock_interruptible(&tb->lock)) { + ret = -ERESTARTSYS; + goto out_rpm_put; + } + + ret = sb_regs_show(port, USB4_SB_TARGET_ROUTER, 0, s); + + mutex_unlock(&tb->lock); +out_rpm_put: + pm_runtime_mark_last_busy(&sw->dev); + pm_runtime_put_autosuspend(&sw->dev); + + return ret; +} +DEBUGFS_ATTR_RW(port_sb_regs); + /** * tb_switch_debugfs_init() - Add debugfs entries for router * @sw: Pointer to the router @@ -1528,6 +1743,9 @@ void tb_switch_debugfs_init(struct tb_switch *sw) if (port->config.counters_support) debugfs_create_file("counters", 0600, debugfs_dir, port, &counters_fops); + if (port->usb4) + debugfs_create_file("sb_regs", DEBUGFS_MODE, debugfs_dir, + port, &port_sb_regs_fops); } margining_switch_init(sw); @@ -1579,6 +1797,56 @@ void tb_service_debugfs_remove(struct tb_service *svc) svc->debugfs_dir = NULL; } +static int retimer_sb_regs_show(struct seq_file *s, void *not_used) +{ + struct tb_retimer *rt = s->private; + struct tb *tb = rt->tb; + int ret; + + pm_runtime_get_sync(&rt->dev); + + if (mutex_lock_interruptible(&tb->lock)) { + ret = -ERESTARTSYS; + goto out_rpm_put; + } + + ret = sb_regs_show(rt->port, USB4_SB_TARGET_RETIMER, rt->index, s); + + mutex_unlock(&tb->lock); +out_rpm_put: + pm_runtime_mark_last_busy(&rt->dev); + pm_runtime_put_autosuspend(&rt->dev); + + return ret; +} +DEBUGFS_ATTR_RW(retimer_sb_regs); + +/** + * tb_retimer_debugfs_init() - Add debugfs directory for retimer + * @rt: Pointer to retimer structure + * + * Adds and populates retimer debugfs directory. + */ +void tb_retimer_debugfs_init(struct tb_retimer *rt) +{ + rt->debugfs_dir = debugfs_create_dir(dev_name(&rt->dev), + tb_debugfs_root); + debugfs_create_file("sb_regs", DEBUGFS_MODE, rt->debugfs_dir, rt, + &retimer_sb_regs_fops); +} + +/** + * tb_retimer_debugfs_remove() - Remove retimer debugfs directory + * @rt: Pointer to retimer structure + * + * Removes the retimer debugfs directory along with its contents. + */ +void tb_retimer_debugfs_remove(struct tb_retimer *rt) +{ + debugfs_remove_recursive(rt->debugfs_dir); + rt->debugfs_dir = NULL; +} + void tb_debugfs_init(void) { tb_debugfs_root = debugfs_create_dir("thunderbolt", NULL); diff --git a/drivers/thunderbolt/retimer.c b/drivers/thunderbolt/retimer.c index d49d6628dbf29..f2c315c168dd3 100644 --- a/drivers/thunderbolt/retimer.c +++ b/drivers/thunderbolt/retimer.c @@ -368,16 +368,16 @@ static int tb_retimer_add(struct tb_port *port, u8 index, u32 auth_status) u32 vendor, device; int ret; - ret = usb4_port_retimer_read(port, index, USB4_SB_VENDOR_ID, &vendor, - sizeof(vendor)); + ret = usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index, + USB4_SB_VENDOR_ID, &vendor, sizeof(vendor)); if (ret) { if (ret != -ENODEV) tb_port_warn(port, "failed read retimer VendorId: %d\n", ret); return ret; } - ret = usb4_port_retimer_read(port, index, USB4_SB_PRODUCT_ID, &device, - sizeof(device)); + ret = usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index, + USB4_SB_PRODUCT_ID, &device, sizeof(device)); if (ret) { if (ret != -ENODEV) tb_port_warn(port, "failed read retimer ProductId: %d\n", ret); @@ -433,12 +433,14 @@ static int tb_retimer_add(struct tb_port *port, u8 index, u32 auth_status) pm_runtime_mark_last_busy(&rt->dev); pm_runtime_use_autosuspend(&rt->dev); + tb_retimer_debugfs_init(rt); return 0; } static void tb_retimer_remove(struct tb_retimer *rt) { dev_info(&rt->dev, "retimer disconnected\n"); + tb_retimer_debugfs_remove(rt); tb_nvm_free(rt->nvm); device_unregister(&rt->dev); } diff --git a/drivers/thunderbolt/sb_regs.h b/drivers/thunderbolt/sb_regs.h index f37a4320f10a5..d2fbb1cdcf3e8 100644 --- a/drivers/thunderbolt/sb_regs.h +++ b/drivers/thunderbolt/sb_regs.h @@ -35,6 +35,9 @@ enum usb4_sb_opcode { #define USB4_SB_METADATA 0x09 #define USB4_SB_METADATA_NVM_AUTH_WRITE_MASK GENMASK(5, 0) +#define USB4_SB_LINK_CONF 0x0c +#define USB4_SB_TXFFE 0x0d +#define USB4_SB_VERSION 0x0e #define USB4_SB_DATA 0x12 /* USB4_SB_OPCODE_READ_LANE_MARGINING_CAP */ diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h index 63d401c2ae60c..983f7c7b9bc1e 100644 --- a/drivers/thunderbolt/tb.h +++ b/drivers/thunderbolt/tb.h @@ -325,6 +325,7 @@ struct usb4_port { * @nvm: Pointer to the NVM if the retimer has one (%NULL otherwise) * @no_nvm_upgrade: Prevent NVM upgrade of this retimer * @auth_status: Status of last NVM authentication + * @debugfs_dir: Pointer to the debugfs structure */ struct tb_retimer { struct device dev; @@ -336,6 +337,7 @@ struct tb_retimer { struct tb_nvm *nvm; bool no_nvm_upgrade; u32 auth_status; + struct dentry *debugfs_dir; }; /** @@ -1319,6 +1321,24 @@ int usb4_port_router_offline(struct tb_port *port); int usb4_port_router_online(struct tb_port *port); int usb4_port_enumerate_retimers(struct tb_port *port); bool usb4_port_clx_supported(struct tb_port *port); + +/** + * enum tb_sb_target - Sideband transaction target + * @USB4_SB_TARGET_ROUTER: Target is the router itself + * @USB4_SB_TARGET_PARTNER: Target is partner + * @USB4_SB_TARGET_RETIMER: Target is retimer + */ +enum usb4_sb_target { + USB4_SB_TARGET_ROUTER, + USB4_SB_TARGET_PARTNER, + USB4_SB_TARGET_RETIMER, +}; + +int usb4_port_sb_read(struct tb_port *port, enum usb4_sb_target target, u8 index, + u8 reg, void *buf, u8 size); +int usb4_port_sb_write(struct tb_port *port, enum usb4_sb_target target, + u8 index, u8 reg, const void *buf, u8 size); + int usb4_port_margining_caps(struct tb_port *port, u32 *caps); bool usb4_port_asym_supported(struct tb_port *port); @@ -1334,10 +1354,6 @@ int usb4_port_sw_margin_errors(struct tb_port *port, u32 *errors); int usb4_port_retimer_set_inbound_sbtx(struct tb_port *port, u8 index); int usb4_port_retimer_unset_inbound_sbtx(struct tb_port *port, u8 index); -int usb4_port_retimer_read(struct tb_port *port, u8 index, u8 reg, void *buf, - u8 size); -int usb4_port_retimer_write(struct tb_port *port, u8 index, u8 reg, - const void *buf, u8 size); int usb4_port_retimer_is_last(struct tb_port *port, u8 index); int usb4_port_retimer_nvm_sector_size(struct tb_port *port, u8 index); int usb4_port_retimer_nvm_set_offset(struct tb_port *port, u8 index, @@ -1437,6 +1453,8 @@ void tb_xdomain_debugfs_init(struct tb_xdomain *xd); void tb_xdomain_debugfs_remove(struct tb_xdomain *xd); void tb_service_debugfs_init(struct tb_service *svc); void tb_service_debugfs_remove(struct tb_service *svc); +void tb_retimer_debugfs_init(struct tb_retimer *rt); +void tb_retimer_debugfs_remove(struct tb_retimer *rt); #else static inline void tb_debugfs_init(void) { } static inline void tb_debugfs_exit(void) { } @@ -1446,6 +1464,8 @@ static inline void tb_xdomain_debugfs_init(struct tb_xdomain *xd) { } static inline void tb_xdomain_debugfs_remove(struct tb_xdomain *xd) { } static inline void tb_service_debugfs_init(struct tb_service *svc) { } static inline void tb_service_debugfs_remove(struct tb_service *svc) { } +static inline void tb_retimer_debugfs_init(struct tb_retimer *rt) { } +static inline void tb_retimer_debugfs_remove(struct tb_retimer *rt) { } #endif #endif diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c index f4fba144105d0..ab97340247e5f 100644 --- a/drivers/thunderbolt/usb4.c +++ b/drivers/thunderbolt/usb4.c @@ -17,12 +17,6 @@ #define USB4_DATA_RETRIES 3 #define USB4_DATA_DWORDS 16 -enum usb4_sb_target { - USB4_SB_TARGET_ROUTER, - USB4_SB_TARGET_PARTNER, - USB4_SB_TARGET_RETIMER, -}; - #define USB4_NVM_READ_OFFSET_MASK GENMASK(23, 2) #define USB4_NVM_READ_OFFSET_SHIFT 2 #define USB4_NVM_READ_LENGTH_MASK GENMASK(27, 24) @@ -1284,8 +1278,20 @@ static int usb4_port_write_data(struct tb_port *port, const void *data, dwords); } -static int usb4_port_sb_read(struct tb_port *port, enum usb4_sb_target target, - u8 index, u8 reg, void *buf, u8 size) +/** + * usb4_port_sb_read() - Read from sideband register + * @port: USB4 port to read + * @target: Sideband target + * @index: Retimer index if taget is %USB4_SB_TARGET_RETIMER + * @reg: Sideband register index + * @buf: Buffer where the sideband data is copied + * @size: Size of @buf + * + * Reads data from sideband register @reg and copies it into @buf. + * Returns %0 in case of success and negative errno in case of failure. + */ +int usb4_port_sb_read(struct tb_port *port, enum usb4_sb_target target, u8 index, + u8 reg, void *buf, u8 size) { size_t dwords = DIV_ROUND_UP(size, 4); int ret; @@ -1324,8 +1330,20 @@ static int usb4_port_sb_read(struct tb_port *port, enum usb4_sb_target target, return buf ? usb4_port_read_data(port, buf, dwords) : 0; } -static int usb4_port_sb_write(struct tb_port *port, enum usb4_sb_target target, - u8 index, u8 reg, const void *buf, u8 size) +/** + * usb4_port_sb_write() - Write to sideband register + * @port: USB4 port to write + * @target: Sideband target + * @index: Retimer index if taget is %USB4_SB_TARGET_RETIMER + * @reg: Sideband register index + * @buf: Data to write + * @size: Size of @buf + * + * Writes @buf to sideband register @reg. Returns %0 in case of success + * and negative errno in case of failure. + */ +int usb4_port_sb_write(struct tb_port *port, enum usb4_sb_target target, + u8 index, u8 reg, const void *buf, u8 size) { size_t dwords = DIV_ROUND_UP(size, 4); int ret; @@ -1768,47 +1786,6 @@ int usb4_port_retimer_unset_inbound_sbtx(struct tb_port *port, u8 index) USB4_SB_OPCODE_UNSET_INBOUND_SBTX, 500); } -/** - * usb4_port_retimer_read() - Read from retimer sideband registers - * @port: USB4 port - * @index: Retimer index - * @reg: Sideband register to read - * @buf: Data from @reg is stored here - * @size: Number of bytes to read - * - * Function reads retimer sideband registers starting from @reg. The - * retimer is connected to @port at @index. Returns %0 in case of - * success, and read data is copied to @buf. If there is no retimer - * present at given @index returns %-ENODEV. In any other failure - * returns negative errno. - */ -int usb4_port_retimer_read(struct tb_port *port, u8 index, u8 reg, void *buf, - u8 size) -{ - return usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index, reg, buf, - size); -} - -/** - * usb4_port_retimer_write() - Write to retimer sideband registers - * @port: USB4 port - * @index: Retimer index - * @reg: Sideband register to write - * @buf: Data that is written starting from @reg - * @size: Number of bytes to write - * - * Writes retimer sideband registers starting from @reg. The retimer is - * connected to @port at @index. Returns %0 in case of success. If there - * is no retimer present at given @index returns %-ENODEV. In any other - * failure returns negative errno. - */ -int usb4_port_retimer_write(struct tb_port *port, u8 index, u8 reg, - const void *buf, u8 size) -{ - return usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index, reg, buf, - size); -} - /** * usb4_port_retimer_is_last() - Is the retimer last on-board retimer * @port: USB4 port @@ -1829,8 +1806,8 @@ int usb4_port_retimer_is_last(struct tb_port *port, u8 index) if (ret) return ret; - ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA, &metadata, - sizeof(metadata)); + ret = usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index, + USB4_SB_METADATA, &metadata, sizeof(metadata)); return ret ? ret : metadata & 1; } @@ -1855,8 +1832,8 @@ int usb4_port_retimer_nvm_sector_size(struct tb_port *port, u8 index) if (ret) return ret; - ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA, &metadata, - sizeof(metadata)); + ret = usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index, + USB4_SB_METADATA, &metadata, sizeof(metadata)); return ret ? ret : metadata & USB4_NVM_SECTOR_SIZE_MASK; } @@ -1881,8 +1858,8 @@ int usb4_port_retimer_nvm_set_offset(struct tb_port *port, u8 index, metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) & USB4_NVM_SET_OFFSET_MASK; - ret = usb4_port_retimer_write(port, index, USB4_SB_METADATA, &metadata, - sizeof(metadata)); + ret = usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index, + USB4_SB_METADATA, &metadata, sizeof(metadata)); if (ret) return ret; @@ -1904,8 +1881,8 @@ static int usb4_port_retimer_nvm_write_next_block(void *data, u8 index = info->index; int ret; - ret = usb4_port_retimer_write(port, index, USB4_SB_DATA, - buf, dwords * 4); + ret = usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index, + USB4_SB_DATA, buf, dwords * 4); if (ret) return ret; @@ -1984,8 +1961,8 @@ int usb4_port_retimer_nvm_authenticate_status(struct tb_port *port, u8 index, u32 metadata, val; int ret; - ret = usb4_port_retimer_read(port, index, USB4_SB_OPCODE, &val, - sizeof(val)); + ret = usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index, + USB4_SB_OPCODE, &val, sizeof(val)); if (ret) return ret; @@ -1996,8 +1973,9 @@ int usb4_port_retimer_nvm_authenticate_status(struct tb_port *port, u8 index, return 0; case -EAGAIN: - ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA, - &metadata, sizeof(metadata)); + ret = usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index, + USB4_SB_METADATA, &metadata, + sizeof(metadata)); if (ret) return ret; @@ -2022,8 +2000,8 @@ static int usb4_port_retimer_nvm_read_block(void *data, unsigned int dwaddress, if (dwords < USB4_DATA_DWORDS) metadata |= dwords << USB4_NVM_READ_LENGTH_SHIFT; - ret = usb4_port_retimer_write(port, index, USB4_SB_METADATA, &metadata, - sizeof(metadata)); + ret = usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index, + USB4_SB_METADATA, &metadata, sizeof(metadata)); if (ret) return ret; @@ -2031,8 +2009,8 @@ static int usb4_port_retimer_nvm_read_block(void *data, unsigned int dwaddress, if (ret) return ret; - return usb4_port_retimer_read(port, index, USB4_SB_DATA, buf, - dwords * 4); + return usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index, + USB4_SB_DATA, buf, dwords * 4); } /** From patchwork Tue Jan 23 13:26:30 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [21/44] thunderbolt: Make margining functions accept target and retimer index From: Imre Deak X-Patchwork-Id: 575611 Message-Id: <20240123132653.413364-22-imre.deak@intel.com> To: intel-gfx-trybot@lists.freedesktop.org Date: Tue, 23 Jan 2024 15:26:30 +0200 From: Mika Westerberg In order to add lane margining support for retimers make the margining functions take sideband target and retimer index as parameters. This makes it possible to access both router and retimer sideband using the same functions. Signed-off-by: Mika Westerberg --- drivers/thunderbolt/debugfs.c | 16 ++++++----- drivers/thunderbolt/tb.h | 24 +++++++++-------- drivers/thunderbolt/usb4.c | 51 +++++++++++++++++++++-------------- 3 files changed, 54 insertions(+), 37 deletions(-) diff --git a/drivers/thunderbolt/debugfs.c b/drivers/thunderbolt/debugfs.c index 8a032be229035..b0b3985f04717 100644 --- a/drivers/thunderbolt/debugfs.c +++ b/drivers/thunderbolt/debugfs.c @@ -752,22 +752,25 @@ static int margining_run_write(void *data, u64 val) if (margining->software) { tb_port_dbg(port, "running software %s lane margining for lanes %u\n", margining->time ? "time" : "voltage", margining->lanes); - ret = usb4_port_sw_margin(port, margining->lanes, margining->time, + ret = usb4_port_sw_margin(port, USB4_SB_TARGET_ROUTER, 0, + margining->lanes, margining->time, margining->right_high, USB4_MARGIN_SW_COUNTER_CLEAR); if (ret) goto out_clx; - ret = usb4_port_sw_margin_errors(port, &margining->results[0]); + ret = usb4_port_sw_margin_errors(port, USB4_SB_TARGET_ROUTER, 0, + &margining->results[0]); } else { tb_port_dbg(port, "running hardware %s lane margining for lanes %u\n", margining->time ? "time" : "voltage", margining->lanes); /* Clear the results */ margining->results[0] = 0; margining->results[1] = 0; - ret = usb4_port_hw_margin(port, margining->lanes, - margining->ber_level, margining->time, - margining->right_high, margining->results); + ret = usb4_port_hw_margin(port, USB4_SB_TARGET_ROUTER, 0, + margining->lanes, margining->ber_level, + margining->time, margining->right_high, + margining->results); } out_clx: @@ -1041,7 +1044,8 @@ static void margining_port_init(struct tb_port *port) margining->port = port; - ret = usb4_port_margining_caps(port, margining->caps); + ret = usb4_port_margining_caps(port, USB4_SB_TARGET_ROUTER, 0, + margining->caps); if (ret) { kfree(margining); return; diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h index 983f7c7b9bc1e..407c052bdd232 100644 --- a/drivers/thunderbolt/tb.h +++ b/drivers/thunderbolt/tb.h @@ -1322,6 +1322,10 @@ int usb4_port_router_online(struct tb_port *port); int usb4_port_enumerate_retimers(struct tb_port *port); bool usb4_port_clx_supported(struct tb_port *port); +bool usb4_port_asym_supported(struct tb_port *port); +int usb4_port_asym_set_link_width(struct tb_port *port, enum tb_link_width width); +int usb4_port_asym_start(struct tb_port *port); + /** * enum tb_sb_target - Sideband transaction target * @USB4_SB_TARGET_ROUTER: Target is the router itself @@ -1339,18 +1343,16 @@ int usb4_port_sb_read(struct tb_port *port, enum usb4_sb_target target, u8 index int usb4_port_sb_write(struct tb_port *port, enum usb4_sb_target target, u8 index, u8 reg, const void *buf, u8 size); -int usb4_port_margining_caps(struct tb_port *port, u32 *caps); - -bool usb4_port_asym_supported(struct tb_port *port); -int usb4_port_asym_set_link_width(struct tb_port *port, enum tb_link_width width); -int usb4_port_asym_start(struct tb_port *port); - -int usb4_port_hw_margin(struct tb_port *port, unsigned int lanes, - unsigned int ber_level, bool timing, bool right_high, - u32 *results); -int usb4_port_sw_margin(struct tb_port *port, unsigned int lanes, bool timing, +int usb4_port_margining_caps(struct tb_port *port, enum usb4_sb_target target, + u8 index, u32 *caps); +int usb4_port_hw_margin(struct tb_port *port, enum usb4_sb_target target, + u8 index, unsigned int lanes, unsigned int ber_level, + bool timing, bool right_high, u32 *results); +int usb4_port_sw_margin(struct tb_port *port, enum usb4_sb_target target, + u8 index, unsigned int lanes, bool timing, bool right_high, u32 counter); -int usb4_port_sw_margin_errors(struct tb_port *port, u32 *errors); +int usb4_port_sw_margin_errors(struct tb_port *port, enum usb4_sb_target target, + u8 index, u32 *errors); int usb4_port_retimer_set_inbound_sbtx(struct tb_port *port, u8 index); int usb4_port_retimer_unset_inbound_sbtx(struct tb_port *port, u8 index); diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c index ab97340247e5f..125f12905bb04 100644 --- a/drivers/thunderbolt/usb4.c +++ b/drivers/thunderbolt/usb4.c @@ -1620,26 +1620,31 @@ int usb4_port_asym_start(struct tb_port *port) /** * usb4_port_margining_caps() - Read USB4 port marginig capabilities * @port: USB4 port + * @target: Sideband target + * @index: Retimer index if taget is %USB4_SB_TARGET_RETIMER * @caps: Array with at least two elements to hold the results * * Reads the USB4 port lane margining capabilities into @caps. */ -int usb4_port_margining_caps(struct tb_port *port, u32 *caps) +int usb4_port_margining_caps(struct tb_port *port, enum usb4_sb_target target, + u8 index, u32 *caps) { int ret; - ret = usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0, + ret = usb4_port_sb_op(port, target, index, USB4_SB_OPCODE_READ_LANE_MARGINING_CAP, 500); if (ret) return ret; - return usb4_port_sb_read(port, USB4_SB_TARGET_ROUTER, 0, - USB4_SB_DATA, caps, sizeof(*caps) * 2); + return usb4_port_sb_read(port, target, index, USB4_SB_DATA, caps, + sizeof(*caps) * 2); } /** * usb4_port_hw_margin() - Run hardware lane margining on port * @port: USB4 port + * @target: Sideband target + * @index: Retimer index if taget is %USB4_SB_TARGET_RETIMER * @lanes: Which lanes to run (must match the port capabilities). Can be * %0, %1 or %7. * @ber_level: BER level contour value @@ -1650,9 +1655,9 @@ int usb4_port_margining_caps(struct tb_port *port, u32 *caps) * Runs hardware lane margining on USB4 port and returns the result in * @results. */ -int usb4_port_hw_margin(struct tb_port *port, unsigned int lanes, - unsigned int ber_level, bool timing, bool right_high, - u32 *results) +int usb4_port_hw_margin(struct tb_port *port, enum usb4_sb_target target, + u8 index, unsigned int lanes, unsigned int ber_level, + bool timing, bool right_high, u32 *results) { u32 val; int ret; @@ -1666,23 +1671,25 @@ int usb4_port_hw_margin(struct tb_port *port, unsigned int lanes, val |= (ber_level << USB4_MARGIN_HW_BER_SHIFT) & USB4_MARGIN_HW_BER_MASK; - ret = usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0, - USB4_SB_METADATA, &val, sizeof(val)); + ret = usb4_port_sb_write(port, target, index, USB4_SB_METADATA, &val, + sizeof(val)); if (ret) return ret; - ret = usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0, + ret = usb4_port_sb_op(port, target, index, USB4_SB_OPCODE_RUN_HW_LANE_MARGINING, 2500); if (ret) return ret; - return usb4_port_sb_read(port, USB4_SB_TARGET_ROUTER, 0, - USB4_SB_DATA, results, sizeof(*results) * 2); + return usb4_port_sb_read(port, target, index, USB4_SB_DATA, results, + sizeof(*results) * 2); } /** * usb4_port_sw_margin() - Run software lane margining on port * @port: USB4 port + * @target: Sideband target + * @index: Retimer index if taget is %USB4_SB_TARGET_RETIMER * @lanes: Which lanes to run (must match the port capabilities). Can be * %0, %1 or %7. * @timing: Perform timing margining instead of voltage @@ -1693,7 +1700,8 @@ int usb4_port_hw_margin(struct tb_port *port, unsigned int lanes, * counters by calling usb4_port_sw_margin_errors(). Returns %0 in * success and negative errno otherwise. */ -int usb4_port_sw_margin(struct tb_port *port, unsigned int lanes, bool timing, +int usb4_port_sw_margin(struct tb_port *port, enum usb4_sb_target target, + u8 index, unsigned int lanes, bool timing, bool right_high, u32 counter) { u32 val; @@ -1707,34 +1715,37 @@ int usb4_port_sw_margin(struct tb_port *port, unsigned int lanes, bool timing, val |= (counter << USB4_MARGIN_SW_COUNTER_SHIFT) & USB4_MARGIN_SW_COUNTER_MASK; - ret = usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0, - USB4_SB_METADATA, &val, sizeof(val)); + ret = usb4_port_sb_write(port, target, index, USB4_SB_METADATA, &val, + sizeof(val)); if (ret) return ret; - return usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0, + return usb4_port_sb_op(port, target, index, USB4_SB_OPCODE_RUN_SW_LANE_MARGINING, 2500); } /** * usb4_port_sw_margin_errors() - Read the software margining error counters * @port: USB4 port + * @target: Sideband target + * @index: Retimer index if taget is %USB4_SB_TARGET_RETIMER * @errors: Error metadata is copied here. * * This reads back the software margining error counters from the port. * Returns %0 in success and negative errno otherwise. */ -int usb4_port_sw_margin_errors(struct tb_port *port, u32 *errors) +int usb4_port_sw_margin_errors(struct tb_port *port, enum usb4_sb_target target, + u8 index, u32 *errors) { int ret; - ret = usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0, + ret = usb4_port_sb_op(port, target, index, USB4_SB_OPCODE_READ_SW_MARGIN_ERR, 150); if (ret) return ret; - return usb4_port_sb_read(port, USB4_SB_TARGET_ROUTER, 0, - USB4_SB_METADATA, errors, sizeof(*errors)); + return usb4_port_sb_read(port, target, index, USB4_SB_METADATA, errors, + sizeof(*errors)); } static inline int usb4_port_retimer_op(struct tb_port *port, u8 index, From patchwork Tue Jan 23 13:26:31 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [22/44] thunderbolt: Add receiver lane margining support for retimers From: Imre Deak X-Patchwork-Id: 575609 Message-Id: <20240123132653.413364-23-imre.deak@intel.com> To: intel-gfx-trybot@lists.freedesktop.org Date: Tue, 23 Jan 2024 15:26:31 +0200 From: Mika Westerberg Retimers support lane margining as well so make this available through debugfs in the same way as we do for the USB4 ports. When this is enabled we also expose retimers on the other side of the cable because typically margining is implemented only on direction towards the cable. However, for the retimers on the other side of the cable we do not allow NVM upgrade to avoid confusing the existing userspace (the same retimer may now appear twice with different name) and is probably not a good idea anyway. Signed-off-by: Mika Westerberg --- drivers/thunderbolt/Kconfig | 11 +++-- drivers/thunderbolt/debugfs.c | 92 +++++++++++++++++++++++++---------- drivers/thunderbolt/retimer.c | 39 +++++++++------ drivers/thunderbolt/sb_regs.h | 1 + drivers/thunderbolt/tb.h | 5 ++ drivers/thunderbolt/usb4.c | 24 +++++++++ 6 files changed, 125 insertions(+), 47 deletions(-) diff --git a/drivers/thunderbolt/Kconfig b/drivers/thunderbolt/Kconfig index 448fd2ec8f6e2..279de1ba44876 100644 --- a/drivers/thunderbolt/Kconfig +++ b/drivers/thunderbolt/Kconfig @@ -28,14 +28,15 @@ config USB4_DEBUGFS_WRITE this for production systems or distro kernels. config USB4_DEBUGFS_MARGINING - bool "Expose receiver lane margining operations under USB4 ports (DANGEROUS)" + bool "Expose receiver lane margining operations under USB4 ports and retimers (DANGEROUS)" depends on DEBUG_FS depends on USB4_DEBUGFS_WRITE help - Enables hardware and software based receiver lane margining support - under each USB4 port. Used for electrical quality and robustness - validation during manufacturing. Should not be enabled by distro - kernels. + Enables hardware and software based receiver lane margining + support under each USB4 port and retimer, including retimers + on the other side of the cable. Used for electrical quality + and robustness validation during manufacturing. Should not be + enabled by distro kernels. config USB4_KUNIT_TEST bool "KUnit tests" if !KUNIT_ALL_TESTS diff --git a/drivers/thunderbolt/debugfs.c b/drivers/thunderbolt/debugfs.c index b0b3985f04717..ed2ff7b9a6d8d 100644 --- a/drivers/thunderbolt/debugfs.c +++ b/drivers/thunderbolt/debugfs.c @@ -358,6 +358,9 @@ static ssize_t retimer_sb_regs_write(struct file *file, /** * struct tb_margining - Lane margining support * @port: USB4 port through which the margining operations are run + * @target: Sideband target + * @index: Retimer index if taget is %USB4_SB_TARGET_RETIMER + * @dev: Pointer to the device that is the target (USB4 port or retimer) * @caps: Port lane margining capabilities * @results: Last lane margining results * @lanes: %0, %1 or %7 (all) @@ -375,6 +378,9 @@ static ssize_t retimer_sb_regs_write(struct file *file, */ struct tb_margining { struct tb_port *port; + enum usb4_sb_target target; + u8 index; + struct device *dev; u32 caps[2]; u32 results[2]; unsigned int lanes; @@ -714,6 +720,7 @@ static int margining_run_write(void *data, u64 val) { struct tb_margining *margining = data; struct tb_port *port = margining->port; + struct device *dev = margining->dev; struct tb_switch *sw = port->sw; struct tb_switch *down_sw; struct tb *tb = sw->tb; @@ -722,7 +729,7 @@ static int margining_run_write(void *data, u64 val) if (val != 1) return -EINVAL; - pm_runtime_get_sync(&sw->dev); + pm_runtime_get_sync(dev); if (mutex_lock_interruptible(&tb->lock)) { ret = -ERESTARTSYS; @@ -750,24 +757,29 @@ static int margining_run_write(void *data, u64 val) } if (margining->software) { - tb_port_dbg(port, "running software %s lane margining for lanes %u\n", - margining->time ? "time" : "voltage", margining->lanes); - ret = usb4_port_sw_margin(port, USB4_SB_TARGET_ROUTER, 0, + tb_port_dbg(port, + "running software %s lane margining for %s lanes %u\n", + margining->time ? "time" : "voltage", dev_name(dev), + margining->lanes); + ret = usb4_port_sw_margin(port, margining->target, margining->index, margining->lanes, margining->time, margining->right_high, USB4_MARGIN_SW_COUNTER_CLEAR); if (ret) goto out_clx; - ret = usb4_port_sw_margin_errors(port, USB4_SB_TARGET_ROUTER, 0, + ret = usb4_port_sw_margin_errors(port, margining->target, + margining->index, &margining->results[0]); } else { - tb_port_dbg(port, "running hardware %s lane margining for lanes %u\n", - margining->time ? "time" : "voltage", margining->lanes); + tb_port_dbg(port, + "running hardware %s lane margining for %s lanes %u\n", + margining->time ? "time" : "voltage", dev_name(dev), + margining->lanes); /* Clear the results */ margining->results[0] = 0; margining->results[1] = 0; - ret = usb4_port_hw_margin(port, USB4_SB_TARGET_ROUTER, 0, + ret = usb4_port_hw_margin(port, margining->target, margining->index, margining->lanes, margining->ber_level, margining->time, margining->right_high, margining->results); @@ -779,8 +791,8 @@ static int margining_run_write(void *data, u64 val) out_unlock: mutex_unlock(&tb->lock); out_rpm_put: - pm_runtime_mark_last_busy(&sw->dev); - pm_runtime_put_autosuspend(&sw->dev); + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); return ret; } @@ -1022,33 +1034,29 @@ static int margining_margin_show(struct seq_file *s, void *not_used) } DEBUGFS_ATTR_RW(margining_margin); -static void margining_port_init(struct tb_port *port) +static struct tb_margining *margining_alloc(struct tb_port *port, + struct device *dev, + enum usb4_sb_target target, + u8 index, struct dentry *parent) { struct tb_margining *margining; - struct dentry *dir, *parent; - struct usb4_port *usb4; - char dir_name[10]; + struct dentry *dir; unsigned int val; int ret; - usb4 = port->usb4; - if (!usb4) - return; - - snprintf(dir_name, sizeof(dir_name), "port%d", port->port); - parent = debugfs_lookup(dir_name, port->sw->debugfs_dir); - margining = kzalloc(sizeof(*margining), GFP_KERNEL); if (!margining) - return; + return NULL; margining->port = port; + margining->target = target; + margining->index = index; + margining->dev = dev; - ret = usb4_port_margining_caps(port, USB4_SB_TARGET_ROUTER, 0, - margining->caps); + ret = usb4_port_margining_caps(port, target, index, margining->caps); if (ret) { kfree(margining); - return; + return NULL; } /* Set the initial mode */ @@ -1101,8 +1109,22 @@ static void margining_port_init(struct tb_port *port) (supports_time(margining) && independent_time_margins(margining))) debugfs_create_file("margin", 0600, dir, margining, &margining_margin_fops); + return margining; +} - usb4->margining = margining; +static void margining_port_init(struct tb_port *port) +{ + struct dentry *parent; + char dir_name[10]; + + if (!port->usb4) + return; + + snprintf(dir_name, sizeof(dir_name), "port%d", port->port); + parent = debugfs_lookup(dir_name, port->sw->debugfs_dir); + port->usb4->margining = margining_alloc(port, &port->usb4->dev, + USB4_SB_TARGET_ROUTER, 0, + parent); } static void margining_port_remove(struct tb_port *port) @@ -1176,11 +1198,27 @@ static void margining_xdomain_remove(struct tb_xdomain *xd) downstream = tb_port_at(xd->route, parent_sw); margining_port_remove(downstream); } + +static void margining_retimer_init(struct tb_retimer *rt) +{ + rt->margining = margining_alloc(rt->port, &rt->dev, + USB4_SB_TARGET_RETIMER, rt->index, + rt->debugfs_dir); +} + +static void margining_retimer_remove(struct tb_retimer *rt) +{ + debugfs_remove_recursive(debugfs_lookup("margining", rt->debugfs_dir)); + kfree(rt->margining); + rt->margining = NULL; +} #else static inline void margining_switch_init(struct tb_switch *sw) { } static inline void margining_switch_remove(struct tb_switch *sw) { } static inline void margining_xdomain_init(struct tb_xdomain *xd) { } static inline void margining_xdomain_remove(struct tb_xdomain *xd) { } +static inline void margining_retimer_init(struct tb_retimer *rt) { } +static inline void margining_retimer_remove(struct tb_retimer *rt) { } #endif static int port_clear_all_counters(struct tb_port *port) @@ -1837,6 +1875,7 @@ void tb_retimer_debugfs_init(struct tb_retimer *rt) tb_debugfs_root); debugfs_create_file("sb_regs", DEBUGFS_MODE, rt->debugfs_dir, rt, &retimer_sb_regs_fops); + margining_retimer_init(rt); } /** @@ -1847,6 +1886,7 @@ void tb_retimer_debugfs_init(struct tb_retimer *rt) */ void tb_retimer_debugfs_remove(struct tb_retimer *rt) { + margining_retimer_remove(rt); debugfs_remove_recursive(rt->debugfs_dir); rt->debugfs_dir = NULL; } diff --git a/drivers/thunderbolt/retimer.c b/drivers/thunderbolt/retimer.c index f2c315c168dd3..9665de41d5d9a 100644 --- a/drivers/thunderbolt/retimer.c +++ b/drivers/thunderbolt/retimer.c @@ -14,7 +14,11 @@ #include "sb_regs.h" #include "tb.h" +#if IS_ENABLED(CONFIG_USB4_DEBUGFS_MARGINING) #define TB_MAX_RETIMER_INDEX 6 +#else +#define TB_MAX_RETIMER_INDEX 2 +#endif /** * tb_retimer_nvm_read() - Read contents of retimer NVM @@ -315,6 +319,8 @@ static ssize_t nvm_version_show(struct device *dev, if (!rt->nvm) ret = -EAGAIN; + else if (rt->no_nvm_upgrade) + ret = -EOPNOTSUPP; else ret = sysfs_emit(buf, "%x.%x\n", rt->nvm->major, rt->nvm->minor); @@ -362,7 +368,8 @@ struct device_type tb_retimer_type = { .release = tb_retimer_release, }; -static int tb_retimer_add(struct tb_port *port, u8 index, u32 auth_status) +static int tb_retimer_add(struct tb_port *port, u8 index, u32 auth_status, + bool on_board) { struct tb_retimer *rt; u32 vendor, device; @@ -384,13 +391,6 @@ static int tb_retimer_add(struct tb_port *port, u8 index, u32 auth_status) return ret; } - /* - * Check that it supports NVM operations. If not then don't add - * the device at all. - */ - ret = usb4_port_retimer_nvm_sector_size(port, index); - if (ret < 0) - return ret; rt = kzalloc(sizeof(*rt), GFP_KERNEL); if (!rt) @@ -403,6 +403,13 @@ static int tb_retimer_add(struct tb_port *port, u8 index, u32 auth_status) rt->port = port; rt->tb = port->sw->tb; + /* + * Only support NVM upgrade for on-board retimers. The retimers + * on the other side of the connection. + */ + if (!on_board || usb4_port_retimer_nvm_sector_size(port, index) <= 0) + rt->no_nvm_upgrade = true; + rt->dev.parent = &port->usb4->dev; rt->dev.bus = &tb_bus_type; rt->dev.type = &tb_retimer_type; @@ -518,26 +525,26 @@ int tb_retimer_scan(struct tb_port *port, bool add) break; } - tb_retimer_unset_inbound_sbtx(port); - - if (!last_idx) - return 0; - - /* Add on-board retimers if they do not exist already */ + /* Add retimers if they do not exist already */ ret = 0; - for (i = 1; i <= last_idx; i++) { + for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) { struct tb_retimer *rt; + /* Skip cable retimers */ + if (usb4_port_retimer_is_cable(port, i)) + continue; + rt = tb_port_find_retimer(port, i); if (rt) { put_device(&rt->dev); } else if (add) { - ret = tb_retimer_add(port, i, status[i]); + ret = tb_retimer_add(port, i, status[i], i <= last_idx); if (ret && ret != -EOPNOTSUPP) break; } } + tb_retimer_unset_inbound_sbtx(port); return ret; } diff --git a/drivers/thunderbolt/sb_regs.h b/drivers/thunderbolt/sb_regs.h index d2fbb1cdcf3e8..87a02aa60bcdd 100644 --- a/drivers/thunderbolt/sb_regs.h +++ b/drivers/thunderbolt/sb_regs.h @@ -22,6 +22,7 @@ enum usb4_sb_opcode { USB4_SB_OPCODE_SET_INBOUND_SBTX = 0x5055534c, /* "LSUP" */ USB4_SB_OPCODE_UNSET_INBOUND_SBTX = 0x50555355, /* "USUP" */ USB4_SB_OPCODE_QUERY_LAST_RETIMER = 0x5453414c, /* "LAST" */ + USB4_SB_OPCODE_QUERY_CABLE_RETIMER = 0x524c4243, /* "CBLR" */ USB4_SB_OPCODE_GET_NVM_SECTOR_SIZE = 0x53534e47, /* "GNSS" */ USB4_SB_OPCODE_NVM_SET_OFFSET = 0x53504f42, /* "BOPS" */ USB4_SB_OPCODE_NVM_BLOCK_WRITE = 0x574b4c42, /* "BLKW" */ diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h index 407c052bdd232..5a0f6d5125c43 100644 --- a/drivers/thunderbolt/tb.h +++ b/drivers/thunderbolt/tb.h @@ -326,6 +326,7 @@ struct usb4_port { * @no_nvm_upgrade: Prevent NVM upgrade of this retimer * @auth_status: Status of last NVM authentication * @debugfs_dir: Pointer to the debugfs structure + * @margining: Pointer to margining structure if enabled */ struct tb_retimer { struct device dev; @@ -338,6 +339,9 @@ struct tb_retimer { bool no_nvm_upgrade; u32 auth_status; struct dentry *debugfs_dir; +#ifdef CONFIG_USB4_DEBUGFS_MARGINING + struct tb_margining *margining; +#endif }; /** @@ -1357,6 +1361,7 @@ int usb4_port_sw_margin_errors(struct tb_port *port, enum usb4_sb_target target, int usb4_port_retimer_set_inbound_sbtx(struct tb_port *port, u8 index); int usb4_port_retimer_unset_inbound_sbtx(struct tb_port *port, u8 index); int usb4_port_retimer_is_last(struct tb_port *port, u8 index); +int usb4_port_retimer_is_cable(struct tb_port *port, u8 index); int usb4_port_retimer_nvm_sector_size(struct tb_port *port, u8 index); int usb4_port_retimer_nvm_set_offset(struct tb_port *port, u8 index, unsigned int address); diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c index 125f12905bb04..a64c464df9555 100644 --- a/drivers/thunderbolt/usb4.c +++ b/drivers/thunderbolt/usb4.c @@ -1822,6 +1822,30 @@ int usb4_port_retimer_is_last(struct tb_port *port, u8 index) return ret ? ret : metadata & 1; } +/** + * usb4_port_retimer_is_cable() - Is the retimer cable retimer + * @port: USB4 port + * @index: Retimer index + * + * If the retimer at @index is last cable retimer this function returns + * %1 and %0 if it is on-board retimer. In case a retimer is not present + * at @index returns %-ENODEV. Otherwise returns negative errno. + */ +int usb4_port_retimer_is_cable(struct tb_port *port, u8 index) +{ + u32 metadata; + int ret; + + ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_QUERY_CABLE_RETIMER, + 500); + if (ret) + return ret; + + ret = usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index, + USB4_SB_METADATA, &metadata, sizeof(metadata)); + return ret ? ret : metadata & 1; +} + /** * usb4_port_retimer_nvm_sector_size() - Read retimer NVM sector size * @port: USB4 port From patchwork Tue Jan 23 13:26:32 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [23/44] thunderbolt: Add trace events support for the control channel From: Imre Deak X-Patchwork-Id: 575610 Message-Id: <20240123132653.413364-24-imre.deak@intel.com> To: intel-gfx-trybot@lists.freedesktop.org Date: Tue, 23 Jan 2024 15:26:32 +0200 From: Gil Fine Sometimes it is useful to see the traffic happening inside the control channel, especially when debugging a possible problem. This adds tracepoints close to the hardware which can be enabled dynamically as needed using the standard Linux trace events facility. Signed-off-by: Mika Westerberg Signed-off-by: Gil Fine --- drivers/thunderbolt/Makefile | 1 + drivers/thunderbolt/ctl.c | 9 ++ drivers/thunderbolt/trace.h | 183 +++++++++++++++++++++++++++++++++++ 3 files changed, 193 insertions(+) create mode 100644 drivers/thunderbolt/trace.h diff --git a/drivers/thunderbolt/Makefile b/drivers/thunderbolt/Makefile index c8b3d7b780982..b44b32dcb8322 100644 --- a/drivers/thunderbolt/Makefile +++ b/drivers/thunderbolt/Makefile @@ -1,4 +1,5 @@ # SPDX-License-Identifier: GPL-2.0-only +ccflags-y := -I$(src) obj-${CONFIG_USB4} := thunderbolt.o thunderbolt-objs := nhi.o nhi_ops.o ctl.o tb.o switch.o cap.o path.o tunnel.o eeprom.o thunderbolt-objs += domain.o dma_port.o icm.o property.o xdomain.o lc.o tmu.o usb4.o diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c index d997a4c545f79..95f296ca44cba 100644 --- a/drivers/thunderbolt/ctl.c +++ b/drivers/thunderbolt/ctl.c @@ -15,6 +15,8 @@ #include "ctl.h" +#define CREATE_TRACE_POINTS +#include "trace.h" #define TB_CTL_RX_PKG_COUNT 10 #define TB_CTL_RETRIES 4 @@ -369,6 +371,9 @@ static int tb_ctl_tx(struct tb_ctl *ctl, const void *data, size_t len, pkg->frame.size = len + 4; pkg->frame.sof = type; pkg->frame.eof = type; + + trace_tb_tx(type, data, len); + cpu_to_be32_array(pkg->buffer, data, len / 4); *(__be32 *) (pkg->buffer + len) = tb_crc(pkg->buffer, len); @@ -384,6 +389,7 @@ static int tb_ctl_tx(struct tb_ctl *ctl, const void *data, size_t len, static bool tb_ctl_handle_event(struct tb_ctl *ctl, enum tb_cfg_pkg_type type, struct ctl_pkg *pkg, size_t size) { + trace_tb_event(type, pkg->buffer, size); return ctl->callback(ctl->callback_data, type, pkg->buffer, size); } @@ -489,6 +495,9 @@ static void tb_ctl_rx_callback(struct tb_ring *ring, struct ring_frame *frame, * triggered from messing with the active requests. */ req = tb_cfg_request_find(pkg->ctl, pkg); + + trace_tb_rx(frame->eof, pkg->buffer, frame->size, !req); + if (req) { if (req->copy(req, pkg)) schedule_work(&req->work); diff --git a/drivers/thunderbolt/trace.h b/drivers/thunderbolt/trace.h new file mode 100644 index 0000000000000..47490cfcceceb --- /dev/null +++ b/drivers/thunderbolt/trace.h @@ -0,0 +1,183 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Thunderbolt tracing support + * + * Copyright (C) 2023, Intel Corporation + * Author: Mika Westerberg + * Gil Fine + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM thunderbolt + +#if !defined(TB_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) +#define TB_TRACE_H_ + +#include +#include + +#include "tb_msgs.h" + +#define tb_cfg_type_name(type) { type, #type } +#define show_type_name(val) \ + __print_symbolic(val, \ + tb_cfg_type_name(TB_CFG_PKG_READ), \ + tb_cfg_type_name(TB_CFG_PKG_WRITE), \ + tb_cfg_type_name(TB_CFG_PKG_ERROR), \ + tb_cfg_type_name(TB_CFG_PKG_NOTIFY_ACK), \ + tb_cfg_type_name(TB_CFG_PKG_EVENT), \ + tb_cfg_type_name(TB_CFG_PKG_XDOMAIN_REQ), \ + tb_cfg_type_name(TB_CFG_PKG_XDOMAIN_RESP), \ + tb_cfg_type_name(TB_CFG_PKG_OVERRIDE), \ + tb_cfg_type_name(TB_CFG_PKG_RESET), \ + tb_cfg_type_name(TB_CFG_PKG_ICM_EVENT), \ + tb_cfg_type_name(TB_CFG_PKG_ICM_CMD), \ + tb_cfg_type_name(TB_CFG_PKG_ICM_RESP)) + +#ifndef TB_TRACE_HELPERS +#define TB_TRACE_HELPERS +static inline const char *show_data_read_write(struct trace_seq *p, + const u32 *data) +{ + const struct cfg_read_pkg *msg = (const struct cfg_read_pkg *)data; + const char *ret = trace_seq_buffer_ptr(p); + + trace_seq_printf(p, "offset=%#x len=%u port=%d config=%#x seq=%d ", + msg->addr.offset, msg->addr.length, msg->addr.port, + msg->addr.space, msg->addr.seq); + + return ret; +} + +static inline const char *show_data_error(struct trace_seq *p, const u32 *data) +{ + const struct cfg_error_pkg *msg = (const struct cfg_error_pkg *)data; + const char *ret = trace_seq_buffer_ptr(p); + + trace_seq_printf(p, "error=%#x port=%d plug=%#x ", msg->error, + msg->port, msg->pg); + + return ret; +} + +static inline const char *show_data_event(struct trace_seq *p, const u32 *data) +{ + const struct cfg_event_pkg *msg = (const struct cfg_event_pkg *)data; + const char *ret = trace_seq_buffer_ptr(p); + + trace_seq_printf(p, "port=%d unplug=%#x ", msg->port, msg->unplug); + + return ret; +} + +static inline const char *show_route(struct trace_seq *p, const u32 *data) +{ + const struct tb_cfg_header *header = (const struct tb_cfg_header *)data; + const char *ret = trace_seq_buffer_ptr(p); + + trace_seq_printf(p, "route=%llx ", tb_cfg_get_route(header)); + + return ret; +} + +static inline const char *show_data(struct trace_seq *p, u8 type, + const u32 *data, u32 length) +{ + const char *ret = trace_seq_buffer_ptr(p); + const char *prefix = ""; + int i; + + show_route(p, data); + + switch (type) { + case TB_CFG_PKG_READ: + case TB_CFG_PKG_WRITE: + show_data_read_write(p, data); + break; + + case TB_CFG_PKG_ERROR: + show_data_error(p, data); + break; + + case TB_CFG_PKG_EVENT: + show_data_event(p, data); + break; + + default: + break; + } + + trace_seq_printf(p, "data=["); + for (i = 0; i < length; i++) { + trace_seq_printf(p, "%s0x%08x", prefix, data[i]); + prefix = ", "; + } + trace_seq_printf(p, "]"); + trace_seq_putc(p, 0); + + return ret; +} +#endif + +DECLARE_EVENT_CLASS(tb_raw, + TP_PROTO(u8 type, const void *data, size_t size), + TP_ARGS(type, data, size), + TP_STRUCT__entry( + __field(u8, type) + __field(size_t, size) + __dynamic_array(u32, data, size / 4) + ), + TP_fast_assign( + __entry->type = type; + __entry->size = size / 4; + memcpy(__get_dynamic_array(data), data, size); + ), + TP_printk("type=%s size=%zd %s", + show_type_name(__entry->type), __entry->size, + show_data(p, __entry->type, __get_dynamic_array(data), + __entry->size) + ) +); + +DEFINE_EVENT(tb_raw, tb_tx, + TP_PROTO(u8 type, const void *data, size_t size), + TP_ARGS(type, data, size) +); + +DEFINE_EVENT(tb_raw, tb_event, + TP_PROTO(u8 type, const void *data, size_t size), + TP_ARGS(type, data, size) +); + +TRACE_EVENT(tb_rx, + TP_PROTO(u8 type, const void *data, size_t size, bool dropped), + TP_ARGS(type, data, size, dropped), + TP_STRUCT__entry( + __field(u8, type) + __field(size_t, size) + __dynamic_array(u32, data, size / 4) + __field(bool, dropped) + ), + TP_fast_assign( + __entry->type = type; + __entry->size = size / 4; + memcpy(__get_dynamic_array(data), data, size); + __entry->dropped = dropped; + ), + TP_printk("type=%s dropped=%u size=%zd %s", + show_type_name(__entry->type), __entry->dropped, __entry->size, + show_data(p, __entry->type, __get_dynamic_array(data), + __entry->size) + ) +); + +#endif /* TB_TRACE_H_ */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . + +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE trace + +/* This part must be outside protection */ +#include From patchwork Tue Jan 23 13:26:33 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [24/44] HACK: thunderbolt: Add scheduling delay to the trace From: Imre Deak X-Patchwork-Id: 575612 Message-Id: <20240123132653.413364-25-imre.deak@intel.com> To: intel-gfx-trybot@lists.freedesktop.org Date: Tue, 23 Jan 2024 15:26:33 +0200 From: Mika Westerberg Not-Signed-off-by: Mika Westerberg --- drivers/thunderbolt/ctl.c | 4 +++- drivers/thunderbolt/nhi.c | 35 +++++++++++++++++++++++++++++++++++ drivers/thunderbolt/trace.h | 12 ++++++++---- include/linux/thunderbolt.h | 4 ++++ 4 files changed, 50 insertions(+), 5 deletions(-) diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c index 95f296ca44cba..82af8cb97e3c9 100644 --- a/drivers/thunderbolt/ctl.c +++ b/drivers/thunderbolt/ctl.c @@ -434,6 +434,7 @@ static void tb_ctl_rx_callback(struct tb_ring *ring, struct ring_frame *frame, { struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame); struct tb_cfg_request *req; + unsigned long delay; __be32 crc32; if (canceled) @@ -496,7 +497,8 @@ static void tb_ctl_rx_callback(struct tb_ring *ring, struct ring_frame *frame, */ req = tb_cfg_request_find(pkg->ctl, pkg); - trace_tb_rx(frame->eof, pkg->buffer, frame->size, !req); + delay = (unsigned long)ktime_us_delta(ktime_get(), frame->received); + trace_tb_rx(frame->eof, pkg->buffer, frame->size, !req, delay); if (req) { if (req->copy(req, pkg)) diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c index e8a4623dc5319..3877fa47ddf0a 100644 --- a/drivers/thunderbolt/nhi.c +++ b/drivers/thunderbolt/nhi.c @@ -295,6 +295,16 @@ static void ring_work(struct work_struct *work) frame = list_first_entry(&ring->in_flight, typeof(*frame), list); list_move_tail(&frame->list, &done); + + if (!frame->received) { + /* + * Frame completed just after we added the + * timestamps so use the timestamp of the last + * interrupt. + */ + frame->received = ring->last_interrupt; + } + if (!ring->is_tx) { frame->size = ring->descriptors[ring->tail].length; frame->eof = ring->descriptors[ring->tail].eof; @@ -327,6 +337,7 @@ int __tb_ring_enqueue(struct tb_ring *ring, struct ring_frame *frame) spin_lock_irqsave(&ring->lock, flags); if (ring->running) { + frame->received = 0; list_add_tail(&frame->list, &ring->queue); ring_write_descriptors(ring); } else { @@ -393,12 +404,36 @@ static void __ring_interrupt_mask(struct tb_ring *ring, bool mask) iowrite32(val, ring->nhi->iobase + reg); } +/* timestamp all completed frames */ +static void ring_timestamp_completed(struct tb_ring *ring) +{ + struct ring_frame *frame; + int tail = ring->tail; + + ring->last_interrupt = ktime_get(); + + list_for_each_entry(frame, &ring->in_flight, list) { + if (!(ring->descriptors[tail].flags + & RING_DESC_COMPLETED)) + break; + + if (!frame->received) + frame->received = ring->last_interrupt; + + tail = (tail + 1) % ring->size; + if (tail == ring->head) + break; + } +} + /* Both @nhi->lock and @ring->lock should be held */ static void __ring_interrupt(struct tb_ring *ring) { if (!ring->running) return; + ring_timestamp_completed(ring); + if (ring->start_poll) { __ring_interrupt_mask(ring, true); ring->start_poll(ring->poll_data); diff --git a/drivers/thunderbolt/trace.h b/drivers/thunderbolt/trace.h index 47490cfcceceb..ec6b19a4ee1f1 100644 --- a/drivers/thunderbolt/trace.h +++ b/drivers/thunderbolt/trace.h @@ -150,22 +150,26 @@ DEFINE_EVENT(tb_raw, tb_event, ); TRACE_EVENT(tb_rx, - TP_PROTO(u8 type, const void *data, size_t size, bool dropped), - TP_ARGS(type, data, size, dropped), + TP_PROTO(u8 type, const void *data, size_t size, bool dropped, + unsigned long delay), + TP_ARGS(type, data, size, dropped, delay), TP_STRUCT__entry( __field(u8, type) __field(size_t, size) __dynamic_array(u32, data, size / 4) __field(bool, dropped) + __field(unsigned long, delay) ), TP_fast_assign( __entry->type = type; __entry->size = size / 4; memcpy(__get_dynamic_array(data), data, size); __entry->dropped = dropped; + __entry->delay = delay; ), - TP_printk("type=%s dropped=%u size=%zd %s", - show_type_name(__entry->type), __entry->dropped, __entry->size, + TP_printk("type=%s dropped=%u size=%zd delay=%lu %s", + show_type_name(__entry->type), + __entry->dropped, __entry->size, __entry->delay, show_data(p, __entry->type, __get_dynamic_array(data), __entry->size) ) diff --git a/include/linux/thunderbolt.h b/include/linux/thunderbolt.h index 2c835e5c41f63..2885ca1ded2e6 100644 --- a/include/linux/thunderbolt.h +++ b/include/linux/thunderbolt.h @@ -528,6 +528,7 @@ struct tb_nhi { * @start_poll: Called when ring interrupt is triggered to start * polling. Passing %NULL keeps the ring in interrupt mode. * @poll_data: Data passed to @start_poll + * @last_interrupt: Time when last interrupt occured. */ struct tb_ring { spinlock_t lock; @@ -551,6 +552,7 @@ struct tb_ring { u16 eof_mask; void (*start_poll)(void *data); void *poll_data; + ktime_t last_interrupt; }; /* Leave ring interrupt enabled on suspend */ @@ -590,6 +592,7 @@ enum ring_desc_flags { * @flags: Flags for the frame (see &enum ring_desc_flags) * @eof: End of frame protocol defined field * @sof: Start of frame protocol defined field + * @received: Time when the frame was received from hardware */ struct ring_frame { dma_addr_t buffer_phy; @@ -599,6 +602,7 @@ struct ring_frame { u32 flags:12; u32 eof:4; u32 sof:4; + ktime_t received; }; /* Minimum size for ring_rx */ From patchwork Tue Jan 23 13:26:34 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [25/44] drm/dp: Add drm_dp_max_dprx_data_rate() From: Imre Deak X-Patchwork-Id: 575613 Message-Id: <20240123132653.413364-26-imre.deak@intel.com> To: intel-gfx-trybot@lists.freedesktop.org Date: Tue, 23 Jan 2024 15:26:34 +0200 Copy intel_dp_max_data_rate() to DRM core. It will be needed by a follow-up DP tunnel patch, checking the maximum rate the DPRX (sink) supports. Accordingly use the drm_dp_max_dprx_data_rate() name for clarity. This patchset will also switch calling the new DRM function in i915 instead of intel_dp_max_data_rate(). Signed-off-by: Imre Deak --- drivers/gpu/drm/display/drm_dp_helper.c | 58 +++++++++++++++++++++++++ include/drm/display/drm_dp_helper.h | 2 + 2 files changed, 60 insertions(+) diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c index b1ca3a1100dab..24911243d4d3a 100644 --- a/drivers/gpu/drm/display/drm_dp_helper.c +++ b/drivers/gpu/drm/display/drm_dp_helper.c @@ -4058,3 +4058,61 @@ int drm_dp_bw_channel_coding_efficiency(bool is_uhbr) return 800000; } EXPORT_SYMBOL(drm_dp_bw_channel_coding_efficiency); + +/* + * Given a link rate and lanes, get the data bandwidth. + * + * Data bandwidth is the actual payload rate, which depends on the data + * bandwidth efficiency and the link rate. + * + * For 8b/10b channel encoding, SST and non-FEC, the data bandwidth efficiency + * is 80%. For example, for a 1.62 Gbps link, 1.62*10^9 bps * 0.80 * (1/8) = + * 162000 kBps. With 8-bit symbols, we have 162000 kHz symbol clock. Just by + * coincidence, the port clock in kHz matches the data bandwidth in kBps, and + * they equal the link bit rate in Gbps multiplied by 100000. (Note that this no + * longer holds for data bandwidth as soon as FEC or MST is taken into account!) + * + * For 128b/132b channel encoding, the data bandwidth efficiency is 96.71%. For + * example, for a 10 Gbps link, 10*10^9 bps * 0.9671 * (1/8) = 1208875 + * kBps. With 32-bit symbols, we have 312500 kHz symbol clock. The value 1000000 + * does not match the symbol clock, the port clock (not even if you think in + * terms of a byte clock), nor the data bandwidth. It only matches the link bit + * rate in units of 10000 bps. + * + * Note that protocol layers above the DPRX link level considered here can + * further limit the maximum data rate. Such layers are the MST topology (with + * limits on the link between the source and first branch device as well as on + * the whole MST path until the DPRX link) and (Thunderbolt) DP tunnels - + * which in turn can encapsulate an MST link with its own limit - with each + * SST or MST encapsulated tunnel sharing the BW of a tunnel group. + * + * TODO: Add support for querying the max data rate with the above limits as + * well. + * + * Returns the maximum data rate in kBps units. + */ +int drm_dp_max_dprx_data_rate(int max_link_rate, int max_lanes) +{ + int ch_coding_efficiency = + drm_dp_bw_channel_coding_efficiency(drm_dp_is_uhbr_rate(max_link_rate)); + int max_link_rate_kbps = max_link_rate * 10; + + /* + * UHBR rates always use 128b/132b channel encoding, and have + * 97.71% data bandwidth efficiency. Consider max_link_rate the + * link bit rate in units of 10000 bps. + */ + /* + * Lower than UHBR rates always use 8b/10b channel encoding, and have + * 80% data bandwidth efficiency for SST non-FEC. However, this turns + * out to be a nop by coincidence: + * + * int max_link_rate_kbps = max_link_rate * 10; + * max_link_rate_kbps = DIV_ROUND_DOWN_ULL(max_link_rate_kbps * 8, 10); + * max_link_rate = max_link_rate_kbps / 8; + */ + return DIV_ROUND_DOWN_ULL(mul_u32_u32(max_link_rate_kbps * max_lanes, + ch_coding_efficiency), + 1000000 * 8); +} +EXPORT_SYMBOL(drm_dp_max_dprx_data_rate); diff --git a/include/drm/display/drm_dp_helper.h b/include/drm/display/drm_dp_helper.h index 863b2e7add29e..454ae7517419a 100644 --- a/include/drm/display/drm_dp_helper.h +++ b/include/drm/display/drm_dp_helper.h @@ -813,4 +813,6 @@ int drm_dp_bw_overhead(int lane_count, int hactive, int bpp_x16, unsigned long flags); int drm_dp_bw_channel_coding_efficiency(bool is_uhbr); +int drm_dp_max_dprx_data_rate(int max_link_rate, int max_lanes); + #endif /* _DRM_DP_HELPER_H_ */ From patchwork Tue Jan 23 13:26:35 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Subject: [26/44] drm/dp: Add support for DP tunneling From: Imre Deak X-Patchwork-Id: 575592 Message-Id: <20240123132653.413364-27-imre.deak@intel.com> To: intel-gfx-trybot@lists.freedesktop.org Date: Tue, 23 Jan 2024 15:26:35 +0200 Add support for Display Port DP tunneling. For now this includes the support for Bandwidth Allocation Mode, leaving adding Panel Replay support for later. BWA allows using displays that share the same (Thunderbolt) link with their maximum resolution. Atm, this may not be possible due to the coarse granularity of partitioning the link BW among the displays on the link: the BW allocation policy is in a SW/FW/HW component on the link (on Thunderbolt it's the SW or FW Connection Manager), independent of the driver. This policy will set the DPRX maximum rate and lane count DPCD registers the GFX driver will see (0x00000, 0x00001, 0x02200, 0x02201) based on the available link BW. The granularity of the current BW allocation policy is course, based on the required link rate in the 1.62Gbs..8.1Gbps range and it may prevent using higher resolutions all together: the display connected first will get a share of the link BW which corresponds to its full DPRX capability (regardless of the actual mode it uses). A subsequent display connected will only get the remaining BW, which could be well below its full capability. BWA solves the above course granularity (reducing it to a 250Mbs..1Gps range) and first-come/first-served issues by letting the driver request the BW for each display on a link which reflects the actual modes the displays use. This patch adds the DRM core helper functions, while a follow-up change in the patchset takes them into use in the i915 driver. Signed-off-by: Imre Deak --- drivers/gpu/drm/display/Kconfig | 17 + drivers/gpu/drm/display/Makefile | 2 + drivers/gpu/drm/display/drm_dp_tunnel.c | 1715 +++++++++++++++++++++++ include/drm/display/drm_dp.h | 60 + include/drm/display/drm_dp_tunnel.h | 270 ++++ 5 files changed, 2064 insertions(+) create mode 100644 drivers/gpu/drm/display/drm_dp_tunnel.c create mode 100644 include/drm/display/drm_dp_tunnel.h diff --git a/drivers/gpu/drm/display/Kconfig b/drivers/gpu/drm/display/Kconfig index 09712b88a5b83..b024a84b94c1c 100644 --- a/drivers/gpu/drm/display/Kconfig +++ b/drivers/gpu/drm/display/Kconfig @@ -17,6 +17,23 @@ config DRM_DISPLAY_DP_HELPER help DRM display helpers for DisplayPort. +config DRM_DISPLAY_DP_TUNNEL + bool + select DRM_DISPLAY_DP_HELPER + help + Enable support for DisplayPort tunnels. + +config DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE + bool "Enable debugging the DP tunnel state" + depends on REF_TRACKER + depends on DRM_DISPLAY_DP_TUNNEL + depends on DEBUG_KERNEL + depends on EXPERT + help + Enables debugging the DP tunnel manager's status. + + If in doubt, say "N". + config DRM_DISPLAY_HDCP_HELPER bool depends on DRM_DISPLAY_HELPER diff --git a/drivers/gpu/drm/display/Makefile b/drivers/gpu/drm/display/Makefile index 17ac4a1006a80..7ca61333c6696 100644 --- a/drivers/gpu/drm/display/Makefile +++ b/drivers/gpu/drm/display/Makefile @@ -8,6 +8,8 @@ drm_display_helper-$(CONFIG_DRM_DISPLAY_DP_HELPER) += \ drm_dp_helper.o \ drm_dp_mst_topology.o \ drm_dsc_helper.o +drm_display_helper-$(CONFIG_DRM_DISPLAY_DP_TUNNEL) += \ + drm_dp_tunnel.o drm_display_helper-$(CONFIG_DRM_DISPLAY_HDCP_HELPER) += drm_hdcp_helper.o drm_display_helper-$(CONFIG_DRM_DISPLAY_HDMI_HELPER) += \ drm_hdmi_helper.o \ diff --git a/drivers/gpu/drm/display/drm_dp_tunnel.c b/drivers/gpu/drm/display/drm_dp_tunnel.c new file mode 100644 index 0000000000000..58f6330db7d9d --- /dev/null +++ b/drivers/gpu/drm/display/drm_dp_tunnel.c @@ -0,0 +1,1715 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2023 Intel Corporation + */ + +#include +#include + +#include + +#include +#include +#include +#include +#include + +#define to_group(__private_obj) \ + container_of(__private_obj, struct drm_dp_tunnel_group, base) + +#define to_group_state(__private_state) \ + container_of(__private_state, struct drm_dp_tunnel_group_state, base) + +#define is_dp_tunnel_private_obj(__obj) \ + ((__obj)->funcs == &tunnel_group_funcs) + +#define for_each_new_group_in_state(__state, __new_group_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->num_private_objs; \ + (__i)++) \ + for_each_if ((__state)->private_objs[__i].ptr && \ + is_dp_tunnel_private_obj((__state)->private_objs[__i].ptr) && \ + ((__new_group_state) = \ + to_group_state((__state)->private_objs[__i].new_state), 1)) + +#define for_each_old_group_in_state(__state, __old_group_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->num_private_objs; \ + (__i)++) \ + for_each_if ((__state)->private_objs[__i].ptr && \ + is_dp_tunnel_private_obj((__state)->private_objs[__i].ptr) && \ + ((__old_group_state) = \ + to_group_state((__state)->private_objs[__i].old_state), 1)) + +#define for_each_tunnel_in_group(__group, __tunnel) \ + list_for_each_entry(__tunnel, &(__group)->tunnels, node) + +#define for_each_tunnel_state(__group_state, __tunnel_state) \ + list_for_each_entry(__tunnel_state, &(__group_state)->tunnel_states, node) + +#define for_each_tunnel_state_safe(__group_state, __tunnel_state, __tunnel_state_tmp) \ + list_for_each_entry_safe(__tunnel_state, __tunnel_state_tmp, \ + &(__group_state)->tunnel_states, node) + +#define kbytes_to_mbits(__kbytes) \ + DIV_ROUND_UP((__kbytes) * 8, 1000) + +#define DPTUN_BW_ARG(__bw) ((__bw) < 0 ? (__bw) : kbytes_to_mbits(__bw)) + +#define __tun_prn(__tunnel, __level, __type, __fmt, ...) \ + drm_##__level##__type((__tunnel)->group->mgr->dev, \ + "[DPTUN %s][%s] " __fmt, \ + drm_dp_tunnel_name(__tunnel), \ + (__tunnel)->aux->name, ## \ + __VA_ARGS__) + +#define tun_dbg(__tunnel, __fmt, ...) \ + __tun_prn(__tunnel, dbg, _kms, __fmt, ## __VA_ARGS__) + +#define tun_dbg_stat(__tunnel, __err, __fmt, ...) do { \ + if (__err) \ + __tun_prn(__tunnel, dbg, _kms, __fmt " (Failed, err: %pe)\n", \ + ## __VA_ARGS__, ERR_PTR(__err)); \ + else \ + __tun_prn(__tunnel, dbg, _kms, __fmt " (Ok)\n", \ + ## __VA_ARGS__); \ +} while (0) + +#define tun_dbg_atomic(__tunnel, __fmt, ...) \ + __tun_prn(__tunnel, dbg, _atomic, __fmt, ## __VA_ARGS__) + +#define tun_grp_dbg(__group, __fmt, ...) \ + drm_dbg_kms((__group)->mgr->dev, \ + "[DPTUN %s] " __fmt, \ + drm_dp_tunnel_group_name(__group), ## \ + __VA_ARGS__) + +#define DP_TUNNELING_BASE DP_TUNNELING_OUI + +#define __DPTUN_REG_RANGE(start, size) \ + GENMASK_ULL(start + size - 1, start) + +#define DPTUN_REG_RANGE(addr, size) \ + __DPTUN_REG_RANGE((addr) - DP_TUNNELING_BASE, size) + +#define DPTUN_REG(addr) DPTUN_REG_RANGE(addr, 1) + +#define DPTUN_INFO_REG_MASK ( \ + DPTUN_REG_RANGE(DP_TUNNELING_OUI, DP_TUNNELING_OUI_BYTES) | \ + DPTUN_REG_RANGE(DP_TUNNELING_DEV_ID, DP_TUNNELING_DEV_ID_BYTES) | \ + DPTUN_REG(DP_TUNNELING_HW_REV) | \ + DPTUN_REG(DP_TUNNELING_SW_REV_MAJOR) | \ + DPTUN_REG(DP_TUNNELING_SW_REV_MINOR) | \ + DPTUN_REG(DP_TUNNELING_CAPABILITIES) | \ + DPTUN_REG(DP_IN_ADAPTER_INFO) | \ + DPTUN_REG(DP_USB4_DRIVER_ID) | \ + DPTUN_REG(DP_USB4_DRIVER_BW_CAPABILITY) | \ + DPTUN_REG(DP_IN_ADAPTER_TUNNEL_INFORMATION) | \ + DPTUN_REG(DP_BW_GRANULARITY) | \ + DPTUN_REG(DP_ESTIMATED_BW) | \ + DPTUN_REG(DP_ALLOCATED_BW) | \ + DPTUN_REG(DP_TUNNELING_MAX_LINK_RATE) | \ + DPTUN_REG(DP_TUNNELING_MAX_LANE_COUNT) | \ + DPTUN_REG(DP_DPTX_BW_ALLOCATION_MODE_CONTROL)) + +static const DECLARE_BITMAP(dptun_info_regs, 64) = { + DPTUN_INFO_REG_MASK & -1UL, +#if BITS_PER_LONG == 32 + DPTUN_INFO_REG_MASK >> 32, +#endif +}; + +struct drm_dp_tunnel_regs { + u8 buf[HWEIGHT64(DPTUN_INFO_REG_MASK)]; +}; + +struct drm_dp_tunnel_group; + +struct drm_dp_tunnel { + struct drm_dp_tunnel_group *group; + + struct list_head node; + + struct kref kref; +#ifdef CONFIG_DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE + struct ref_tracker *tracker; +#endif + struct drm_dp_aux *aux; + char name[8]; + + int bw_granularity; + int estimated_bw; + int allocated_bw; + + int max_dprx_rate; + u8 max_dprx_lane_count; + + u8 adapter_id; + + bool bw_alloc_supported:1; + bool bw_alloc_enabled:1; + bool has_io_error:1; + bool destroyed:1; +}; + +struct drm_dp_tunnel_group_state; + +struct drm_dp_tunnel_state { + struct drm_dp_tunnel_group_state *group_state; + + struct drm_dp_tunnel_ref tunnel_ref; + + struct list_head node; + + u32 stream_mask; + int *stream_bw; +}; + +struct drm_dp_tunnel_group_state { + struct drm_private_state base; + + struct list_head tunnel_states; +}; + +struct drm_dp_tunnel_group { + struct drm_private_obj base; + struct drm_dp_tunnel_mgr *mgr; + + struct list_head tunnels; + + int available_bw; /* available BW including the allocated_bw of all tunnels */ + int drv_group_id; + + char name[8]; + + bool active:1; +}; + +struct drm_dp_tunnel_mgr { + struct drm_device *dev; + + int group_count; + struct drm_dp_tunnel_group *groups; + wait_queue_head_t bw_req_queue; + +#ifdef CONFIG_DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE + struct ref_tracker_dir ref_tracker; +#endif +}; + +static int next_reg_area(int *offset) +{ + *offset = find_next_bit(dptun_info_regs, 64, *offset); + + return find_next_zero_bit(dptun_info_regs, 64, *offset + 1) - *offset; +} + +#define tunnel_reg_ptr(__regs, __address) ({ \ + WARN_ON(!test_bit((__address) - DP_TUNNELING_BASE, dptun_info_regs)); \ + &(__regs)->buf[bitmap_weight(dptun_info_regs, (__address) - DP_TUNNELING_BASE)]; \ +}) + +static int read_tunnel_regs(struct drm_dp_aux *aux, struct drm_dp_tunnel_regs *regs) +{ + int offset = 0; + int len; + + while ((len = next_reg_area(&offset))) { + int address = DP_TUNNELING_BASE + offset; + + if (drm_dp_dpcd_read(aux, address, tunnel_reg_ptr(regs, address), len) < 0) + return -EIO; + + offset += len; + } + + return 0; +} + +static u8 tunnel_reg(const struct drm_dp_tunnel_regs *regs, int address) +{ + return *tunnel_reg_ptr(regs, address); +} + +static int tunnel_reg_drv_group_id(const struct drm_dp_tunnel_regs *regs) +{ + int drv_id = tunnel_reg(regs, DP_USB4_DRIVER_ID) & DP_USB4_DRIVER_ID_MASK; + int group_id = tunnel_reg(regs, DP_IN_ADAPTER_TUNNEL_INFORMATION) & DP_GROUP_ID_MASK; + + if (!group_id) + return 0; + + return (drv_id << DP_GROUP_ID_BITS) | group_id; +} + +/* Return granularity in kB/s units */ +static int tunnel_reg_bw_granularity(const struct drm_dp_tunnel_regs *regs) +{ + int gr = tunnel_reg(regs, DP_BW_GRANULARITY) & DP_BW_GRANULARITY_MASK; + + WARN_ON(gr > 2); + + return (250000 << gr) / 8; +} + +static int tunnel_reg_max_dprx_rate(const struct drm_dp_tunnel_regs *regs) +{ + u8 bw_code = tunnel_reg(regs, DP_TUNNELING_MAX_LINK_RATE); + + return drm_dp_bw_code_to_link_rate(bw_code); +} + +static int tunnel_reg_max_dprx_lane_count(const struct drm_dp_tunnel_regs *regs) +{ + u8 lane_count = tunnel_reg(regs, DP_TUNNELING_MAX_LANE_COUNT) & + DP_TUNNELING_MAX_LANE_COUNT_MASK; + + return lane_count; +} + +static bool tunnel_reg_bw_alloc_supported(const struct drm_dp_tunnel_regs *regs) +{ + u8 cap_mask = DP_TUNNELING_SUPPORT | DP_IN_BW_ALLOCATION_MODE_SUPPORT; + + if ((tunnel_reg(regs, DP_TUNNELING_CAPABILITIES) & cap_mask) != cap_mask) + return false; + + return tunnel_reg(regs, DP_USB4_DRIVER_BW_CAPABILITY) & + DP_USB4_DRIVER_BW_ALLOCATION_MODE_SUPPORT; +} + +static bool tunnel_reg_bw_alloc_enabled(const struct drm_dp_tunnel_regs *regs) +{ + return tunnel_reg(regs, DP_DPTX_BW_ALLOCATION_MODE_CONTROL) & + DP_DISPLAY_DRIVER_BW_ALLOCATION_MODE_ENABLE; +} + +static int tunnel_group_drv_id(int drv_group_id) +{ + return drv_group_id >> DP_GROUP_ID_BITS; +} + +static int tunnel_group_id(int drv_group_id) +{ + return drv_group_id & DP_GROUP_ID_MASK; +} + +const char *drm_dp_tunnel_name(const struct drm_dp_tunnel *tunnel) +{ + return tunnel->name; +} +EXPORT_SYMBOL(drm_dp_tunnel_name); + +static const char *drm_dp_tunnel_group_name(const struct drm_dp_tunnel_group *group) +{ + return group->name; +} + +static struct drm_dp_tunnel_group * +lookup_or_alloc_group(struct drm_dp_tunnel_mgr *mgr, int drv_group_id) +{ + struct drm_dp_tunnel_group *group = NULL; + int i; + + for (i = 0; i < mgr->group_count; i++) { + /* + * A tunnel group with 0 group ID shouldn't have more than one + * tunnels. + */ + if (tunnel_group_id(drv_group_id) && + mgr->groups[i].drv_group_id == drv_group_id) + return &mgr->groups[i]; + + if (!group && !mgr->groups[i].active) + group = &mgr->groups[i]; + } + + if (!group) { + drm_dbg_kms(mgr->dev, + "DPTUN: Can't allocate more tunnel groups\n"); + return NULL; + } + + group->drv_group_id = drv_group_id; + group->active = true; + + snprintf(group->name, sizeof(group->name), "%d:%d:*", + tunnel_group_drv_id(drv_group_id) & ((1 << DP_GROUP_ID_BITS) - 1), + tunnel_group_id(drv_group_id) & ((1 << DP_USB4_DRIVER_ID_BITS) - 1)); + + return group; +} + +static void free_group(struct drm_dp_tunnel_group *group) +{ + struct drm_dp_tunnel_mgr *mgr = group->mgr; + + if (drm_WARN_ON(mgr->dev, !list_empty(&group->tunnels))) + return; + + group->drv_group_id = 0; + group->available_bw = -1; + group->active = false; +} + +static struct drm_dp_tunnel * +tunnel_get(struct drm_dp_tunnel *tunnel) +{ + kref_get(&tunnel->kref); + + return tunnel; +} + +static void free_tunnel(struct kref *kref) +{ + struct drm_dp_tunnel *tunnel = container_of(kref, typeof(*tunnel), kref); + struct drm_dp_tunnel_group *group = tunnel->group; + + list_del(&tunnel->node); + if (list_empty(&group->tunnels)) + free_group(group); + + kfree(tunnel); +} + +static void tunnel_put(struct drm_dp_tunnel *tunnel) +{ + kref_put(&tunnel->kref, free_tunnel); +} + +#ifdef CONFIG_DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE +static void track_tunnel_ref(struct drm_dp_tunnel *tunnel, + struct ref_tracker **tracker) +{ + ref_tracker_alloc(&tunnel->group->mgr->ref_tracker, + tracker, GFP_KERNEL); +} + +static void untrack_tunnel_ref(struct drm_dp_tunnel *tunnel, + struct ref_tracker **tracker) +{ + ref_tracker_free(&tunnel->group->mgr->ref_tracker, + tracker); +} + +struct drm_dp_tunnel * +drm_dp_tunnel_get_untracked(struct drm_dp_tunnel *tunnel) +{ + track_tunnel_ref(tunnel, NULL); + + return tunnel_get(tunnel); +} +EXPORT_SYMBOL(drm_dp_tunnel_get_untracked); + +void drm_dp_tunnel_put_untracked(struct drm_dp_tunnel *tunnel) +{ + tunnel_put(tunnel); + untrack_tunnel_ref(tunnel, NULL); +} +EXPORT_SYMBOL(drm_dp_tunnel_put_untracked); + +struct drm_dp_tunnel * +drm_dp_tunnel_get(struct drm_dp_tunnel *tunnel, + struct ref_tracker **tracker) +{ + track_tunnel_ref(tunnel, tracker); + + return tunnel_get(tunnel); +} +EXPORT_SYMBOL(drm_dp_tunnel_get); + +void drm_dp_tunnel_put(struct drm_dp_tunnel *tunnel, + struct ref_tracker **tracker) +{ + untrack_tunnel_ref(tunnel, tracker); + tunnel_put(tunnel); +} +EXPORT_SYMBOL(drm_dp_tunnel_put); +#else +#define track_tunnel_ref(tunnel, tracker) do {} while (0) +#define untrack_tunnel_ref(tunnel, tracker) do {} while (0) + +struct drm_dp_tunnel * +drm_dp_tunnel_get_untracked(struct drm_dp_tunnel *tunnel) +{ + return tunnel_get(tunnel); +} +EXPORT_SYMBOL(drm_dp_tunnel_get_untracked); + +void drm_dp_tunnel_put_untracked(struct drm_dp_tunnel *tunnel) +{ + tunnel_put(tunnel); +} +EXPORT_SYMBOL(drm_dp_tunnel_put_untracked); +#endif + +static bool add_tunnel_to_group(struct drm_dp_tunnel_mgr *mgr, + int drv_group_id, + struct drm_dp_tunnel *tunnel) +{ + struct drm_dp_tunnel_group *group = + lookup_or_alloc_group(mgr, drv_group_id); + + if (!group) + return false; + + tunnel->group = group; + list_add(&tunnel->node, &group->tunnels); + + return true; +} + +static struct drm_dp_tunnel * +create_tunnel(struct drm_dp_tunnel_mgr *mgr, + struct drm_dp_aux *aux, + const struct drm_dp_tunnel_regs *regs) +{ + int drv_group_id = tunnel_reg_drv_group_id(regs); + struct drm_dp_tunnel *tunnel; + + tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL); + if (!tunnel) + return NULL; + + INIT_LIST_HEAD(&tunnel->node); + + kref_init(&tunnel->kref); + + tunnel->aux = aux; + + tunnel->adapter_id = tunnel_reg(regs, DP_IN_ADAPTER_INFO) & DP_IN_ADAPTER_NUMBER_MASK; + + snprintf(tunnel->name, sizeof(tunnel->name), "%d:%d:%d", + tunnel_group_drv_id(drv_group_id) & ((1 << DP_GROUP_ID_BITS) - 1), + tunnel_group_id(drv_group_id) & ((1 << DP_USB4_DRIVER_ID_BITS) - 1), + tunnel->adapter_id & ((1 << DP_IN_ADAPTER_NUMBER_BITS) - 1)); + + tunnel->bw_granularity = tunnel_reg_bw_granularity(regs); + tunnel->allocated_bw = tunnel_reg(regs, DP_ALLOCATED_BW) * + tunnel->bw_granularity; + + tunnel->bw_alloc_supported = tunnel_reg_bw_alloc_supported(regs); + tunnel->bw_alloc_enabled = tunnel_reg_bw_alloc_enabled(regs); + + if (!add_tunnel_to_group(mgr, drv_group_id, tunnel)) { + kfree(tunnel); + + return NULL; + } + + track_tunnel_ref(tunnel, &tunnel->tracker); + + return tunnel; +} + +static void destroy_tunnel(struct drm_dp_tunnel *tunnel) +{ + untrack_tunnel_ref(tunnel, &tunnel->tracker); + tunnel_put(tunnel); +} + +void drm_dp_tunnel_set_io_error(struct drm_dp_tunnel *tunnel) +{ + tunnel->has_io_error = true; +} +EXPORT_SYMBOL(drm_dp_tunnel_set_io_error); + +static char yes_no_chr(int val) +{ + return val ? 'Y' : 'N'; +} + +#define SKIP_DPRX_CAPS_CHECK BIT(0) +#define ALLOW_ALLOCATED_BW_CHANGE BIT(1) + +static bool tunnel_regs_are_valid(struct drm_dp_tunnel_mgr *mgr, + const struct drm_dp_tunnel_regs *regs, + unsigned int flags) +{ + int drv_group_id = tunnel_reg_drv_group_id(regs); + bool check_dprx = !(flags & SKIP_DPRX_CAPS_CHECK); + bool ret = true; + + if (!tunnel_reg_bw_alloc_supported(regs)) { + if (tunnel_group_id(drv_group_id)) { + drm_dbg_kms(mgr->dev, + "DPTUN: A non-zero group ID is only allowed with BWA support\n"); + ret = false; + } + + if (tunnel_reg(regs, DP_ALLOCATED_BW)) { + drm_dbg_kms(mgr->dev, + "DPTUN: BW is allocated without BWA support\n"); + ret = false; + } + + return ret; + } + + if (!tunnel_group_id(drv_group_id)) { + drm_dbg_kms(mgr->dev, + "DPTUN: BWA support requires a non-zero group ID\n"); + ret = false; + } + + if (check_dprx && hweight8(tunnel_reg_max_dprx_lane_count(regs)) != 1) { + drm_dbg_kms(mgr->dev, + "DPTUN: Invalid DPRX lane count: %d\n", + tunnel_reg_max_dprx_lane_count(regs)); + + ret = false; + } + + if (check_dprx && !tunnel_reg_max_dprx_rate(regs)) { + drm_dbg_kms(mgr->dev, + "DPTUN: DPRX rate is 0\n"); + + ret = false; + } + + if (tunnel_reg(regs, DP_ALLOCATED_BW) > tunnel_reg(regs, DP_ESTIMATED_BW)) { + drm_dbg_kms(mgr->dev, + "DPTUN: Allocated BW %d > estimated BW %d Mb/s\n", + DPTUN_BW_ARG(tunnel_reg(regs, DP_ALLOCATED_BW) * + tunnel_reg_bw_granularity(regs)), + DPTUN_BW_ARG(tunnel_reg(regs, DP_ESTIMATED_BW) * + tunnel_reg_bw_granularity(regs))); + + ret = false; + } + + return ret; +} + +static bool tunnel_info_changes_are_valid(struct drm_dp_tunnel *tunnel, + const struct drm_dp_tunnel_regs *regs, + unsigned int flags) +{ + int new_drv_group_id = tunnel_reg_drv_group_id(regs); + bool ret = true; + + if (tunnel->bw_alloc_supported != tunnel_reg_bw_alloc_supported(regs)) { + tun_dbg(tunnel, + "BW alloc support has changed %c -> %c\n", + yes_no_chr(tunnel->bw_alloc_supported), + yes_no_chr(tunnel_reg_bw_alloc_supported(regs))); + + ret = false; + } + + if (tunnel->group->drv_group_id != new_drv_group_id) { + tun_dbg(tunnel, + "Driver/group ID has changed %d:%d:* -> %d:%d:*\n", + tunnel_group_drv_id(tunnel->group->drv_group_id), + tunnel_group_id(tunnel->group->drv_group_id), + tunnel_group_drv_id(new_drv_group_id), + tunnel_group_id(new_drv_group_id)); + + ret = false; + } + + if (!tunnel->bw_alloc_supported) + return ret; + + if (tunnel->bw_granularity != tunnel_reg_bw_granularity(regs)) { + tun_dbg(tunnel, + "BW granularity has changed: %d -> %d Mb/s\n", + DPTUN_BW_ARG(tunnel->bw_granularity), + DPTUN_BW_ARG(tunnel_reg_bw_granularity(regs))); + + ret = false; + } + + /* + * On some devices at least the BW alloc mode enabled status is always + * reported as 0, so skip checking that here. + */ + + if (!(flags & ALLOW_ALLOCATED_BW_CHANGE) && + tunnel->allocated_bw != + tunnel_reg(regs, DP_ALLOCATED_BW) * tunnel->bw_granularity) { + tun_dbg(tunnel, + "Allocated BW has changed: %d -> %d Mb/s\n", + DPTUN_BW_ARG(tunnel->allocated_bw), + DPTUN_BW_ARG(tunnel_reg(regs, DP_ALLOCATED_BW) * tunnel->bw_granularity)); + + ret = false; + } + + return ret; +} + +static int +read_and_verify_tunnel_regs(struct drm_dp_tunnel *tunnel, + struct drm_dp_tunnel_regs *regs, + unsigned int flags) +{ + int err; + + err = read_tunnel_regs(tunnel->aux, regs); + if (err < 0) { + drm_dp_tunnel_set_io_error(tunnel); + + return err; + } + + if (!tunnel_regs_are_valid(tunnel->group->mgr, regs, flags)) + return -EINVAL; + + if (!tunnel_info_changes_are_valid(tunnel, regs, flags)) + return -EINVAL; + + return 0; +} + +static bool update_dprx_caps(struct drm_dp_tunnel *tunnel, const struct drm_dp_tunnel_regs *regs) +{ + bool changed = false; + + if (tunnel_reg_max_dprx_rate(regs) != tunnel->max_dprx_rate) { + tunnel->max_dprx_rate = tunnel_reg_max_dprx_rate(regs); + changed = true; + } + + if (tunnel_reg_max_dprx_lane_count(regs) != tunnel->max_dprx_lane_count) { + tunnel->max_dprx_lane_count = tunnel_reg_max_dprx_lane_count(regs); + changed = true; + } + + return changed; +} + +static int dev_id_len(const u8 *dev_id, int max_len) +{ + while (max_len && dev_id[max_len - 1] == '\0') + max_len--; + + return max_len; +} + +static int get_max_dprx_bw(const struct drm_dp_tunnel *tunnel) +{ + int bw = drm_dp_max_dprx_data_rate(tunnel->max_dprx_rate, + tunnel->max_dprx_lane_count); + + return min(roundup(bw, tunnel->bw_granularity), + MAX_DP_REQUEST_BW * tunnel->bw_granularity); +} + +static int get_max_tunnel_bw(const struct drm_dp_tunnel *tunnel) +{ + return min(get_max_dprx_bw(tunnel), tunnel->group->available_bw); +} + +/** + * drm_dp_tunnel_detect - Detect DP tunnel on the link + * @mgr: Tunnel manager + * @aux: DP AUX on which the tunnel will be detected + * + * Detect if there is any DP tunnel on the link and add it to the tunnel + * group's tunnel list. + * + * Returns 0 on success, negative error code on failure. + */ +struct drm_dp_tunnel * +drm_dp_tunnel_detect(struct drm_dp_tunnel_mgr *mgr, + struct drm_dp_aux *aux) +{ + struct drm_dp_tunnel_regs regs; + struct drm_dp_tunnel *tunnel; + int err; + + err = read_tunnel_regs(aux, ®s); + if (err) + return ERR_PTR(err); + + if (!(tunnel_reg(®s, DP_TUNNELING_CAPABILITIES) & + DP_TUNNELING_SUPPORT)) + return ERR_PTR(-ENODEV); + + /* The DPRX caps are valid only after enabling BW alloc mode. */ + if (!tunnel_regs_are_valid(mgr, ®s, SKIP_DPRX_CAPS_CHECK)) + return ERR_PTR(-EINVAL); + + tunnel = create_tunnel(mgr, aux, ®s); + if (!tunnel) + return ERR_PTR(-ENOMEM); + + tun_dbg(tunnel, + "OUI:%*phD DevID:%*pE Rev-HW:%d.%d SW:%d.%d PR-Sup:%c BWA-Sup:%c BWA-En:%c\n", + DP_TUNNELING_OUI_BYTES, + tunnel_reg_ptr(®s, DP_TUNNELING_OUI), + dev_id_len(tunnel_reg_ptr(®s, DP_TUNNELING_DEV_ID), DP_TUNNELING_DEV_ID_BYTES), + tunnel_reg_ptr(®s, DP_TUNNELING_DEV_ID), + (tunnel_reg(®s, DP_TUNNELING_HW_REV) & DP_TUNNELING_HW_REV_MAJOR_MASK) >> + DP_TUNNELING_HW_REV_MAJOR_SHIFT, + (tunnel_reg(®s, DP_TUNNELING_HW_REV) & DP_TUNNELING_HW_REV_MINOR_MASK) >> + DP_TUNNELING_HW_REV_MINOR_SHIFT, + tunnel_reg(®s, DP_TUNNELING_SW_REV_MAJOR), + tunnel_reg(®s, DP_TUNNELING_SW_REV_MINOR), + yes_no_chr(tunnel_reg(®s, DP_TUNNELING_CAPABILITIES) & + DP_PANEL_REPLAY_OPTIMIZATION_SUPPORT), + yes_no_chr(tunnel->bw_alloc_supported), + yes_no_chr(tunnel->bw_alloc_enabled)); + + return tunnel; +} +EXPORT_SYMBOL(drm_dp_tunnel_detect); + +/** + * drm_dp_tunnel_destroy - Destroy tunnel object + * @tunnel: Tunnel object + * + * Remove the tunnel from the tunnel topology and destroy it. + */ +int drm_dp_tunnel_destroy(struct drm_dp_tunnel *tunnel) +{ + if (drm_WARN_ON(tunnel->group->mgr->dev, tunnel->destroyed)) + return -ENODEV; + + tun_dbg(tunnel, "destroying\n"); + + tunnel->destroyed = true; + destroy_tunnel(tunnel); + + return 0; +} +EXPORT_SYMBOL(drm_dp_tunnel_destroy); + +static int check_tunnel(const struct drm_dp_tunnel *tunnel) +{ + if (tunnel->destroyed) + return -ENODEV; + + if (tunnel->has_io_error) + return -EIO; + + return 0; +} + +static int group_allocated_bw(struct drm_dp_tunnel_group *group) +{ + struct drm_dp_tunnel *tunnel; + int group_allocated_bw = 0; + + for_each_tunnel_in_group(group, tunnel) { + if (check_tunnel(tunnel) == 0 && + tunnel->bw_alloc_enabled) + group_allocated_bw += tunnel->allocated_bw; + } + + return group_allocated_bw; +} + +static int calc_group_available_bw(const struct drm_dp_tunnel *tunnel) +{ + return group_allocated_bw(tunnel->group) - + tunnel->allocated_bw + + tunnel->estimated_bw; +} + +static int update_group_available_bw(struct drm_dp_tunnel *tunnel, + const struct drm_dp_tunnel_regs *regs) +{ + struct drm_dp_tunnel *tunnel_iter; + int group_available_bw; + bool changed; + + tunnel->estimated_bw = tunnel_reg(regs, DP_ESTIMATED_BW) * tunnel->bw_granularity; + + if (calc_group_available_bw(tunnel) == tunnel->group->available_bw) + return 0; + + for_each_tunnel_in_group(tunnel->group, tunnel_iter) { + int err; + + if (tunnel_iter == tunnel) + continue; + + if (check_tunnel(tunnel_iter) != 0 || + !tunnel_iter->bw_alloc_enabled) + continue; + + err = drm_dp_dpcd_probe(tunnel_iter->aux, DP_DPCD_REV); + if (err) { + tun_dbg(tunnel_iter, + "Probe failed, assume disconnected (err %pe)\n", + ERR_PTR(err)); + drm_dp_tunnel_set_io_error(tunnel_iter); + } + } + + group_available_bw = calc_group_available_bw(tunnel); + + tun_dbg(tunnel, "Updated group available BW: %d->%d\n", + DPTUN_BW_ARG(tunnel->group->available_bw), + DPTUN_BW_ARG(group_available_bw)); + + changed = tunnel->group->available_bw != group_available_bw; + + tunnel->group->available_bw = group_available_bw; + + return changed ? 1 : 0; +} + +static int set_bw_alloc_mode(struct drm_dp_tunnel *tunnel, bool enable) +{ + u8 mask = DP_DISPLAY_DRIVER_BW_ALLOCATION_MODE_ENABLE | DP_UNMASK_BW_ALLOCATION_IRQ; + u8 val; + + if (drm_dp_dpcd_readb(tunnel->aux, DP_DPTX_BW_ALLOCATION_MODE_CONTROL, &val) < 0) + goto out_err; + + if (enable) + val |= mask; + else + val &= ~mask; + + if (drm_dp_dpcd_writeb(tunnel->aux, DP_DPTX_BW_ALLOCATION_MODE_CONTROL, val) < 0) + goto out_err; + + tunnel->bw_alloc_enabled = enable; + + return 0; + +out_err: + drm_dp_tunnel_set_io_error(tunnel); + + return -EIO; +} + +/** + * drm_dp_tunnel_enable_bw_alloc: Enable DP tunnel BW allocation mode + * @tunnel: Tunnel object + * + * Enable the DP tunnel BW allocation mode on @tunnel if it supports it. + * + * Returns 0 in case of success, negative error code otherwise. + */ +int drm_dp_tunnel_enable_bw_alloc(struct drm_dp_tunnel *tunnel) +{ + struct drm_dp_tunnel_regs regs; + int err = check_tunnel(tunnel); + + if (err) + return err; + + if (!tunnel->bw_alloc_supported) + return -EOPNOTSUPP; + + if (!tunnel_group_id(tunnel->group->drv_group_id)) + return -EINVAL; + + err = set_bw_alloc_mode(tunnel, true); + if (err) + goto out; + + err = read_and_verify_tunnel_regs(tunnel, ®s, 0); + if (err) { + set_bw_alloc_mode(tunnel, false); + + goto out; + } + + if (!tunnel->max_dprx_rate) + update_dprx_caps(tunnel, ®s); + + if (tunnel->group->available_bw == -1) { + err = update_group_available_bw(tunnel, ®s); + if (err > 0) + err = 0; + } +out: + tun_dbg_stat(tunnel, err, + "Enabling BW alloc mode: DPRX:%dx%d Group alloc:%d/%d Mb/s", + tunnel->max_dprx_rate / 100, tunnel->max_dprx_lane_count, + DPTUN_BW_ARG(group_allocated_bw(tunnel->group)), + DPTUN_BW_ARG(tunnel->group->available_bw)); + + return err; +} +EXPORT_SYMBOL(drm_dp_tunnel_enable_bw_alloc); + +/** + * drm_dp_tunnel_disable_bw_alloc: Disable DP tunnel BW allocation mode + * @tunnel: Tunnel object + * + * Disable the DP tunnel BW allocation mode on @tunnel. + * + * Returns 0 in case of success, negative error code otherwise. + */ +int drm_dp_tunnel_disable_bw_alloc(struct drm_dp_tunnel *tunnel) +{ + int err = check_tunnel(tunnel); + + if (err) + return err; + + err = set_bw_alloc_mode(tunnel, false); + + tun_dbg_stat(tunnel, err, "Disabling BW alloc mode"); + + return err; +} +EXPORT_SYMBOL(drm_dp_tunnel_disable_bw_alloc); + +bool drm_dp_tunnel_bw_alloc_is_enabled(const struct drm_dp_tunnel *tunnel) +{ + return tunnel->bw_alloc_enabled; +} +EXPORT_SYMBOL(drm_dp_tunnel_bw_alloc_is_enabled); + +static int bw_req_complete(struct drm_dp_aux *aux, bool *status_changed) +{ + u8 bw_req_mask = DP_BW_REQUEST_SUCCEEDED | DP_BW_REQUEST_FAILED; + u8 status_change_mask = DP_BW_ALLOCATION_CAPABILITY_CHANGED | DP_ESTIMATED_BW_CHANGED; + u8 val; + + if (drm_dp_dpcd_readb(aux, DP_TUNNELING_STATUS, &val) < 0) + return -EIO; + + *status_changed = val & status_change_mask; + + val &= bw_req_mask; + + if (!val) + return -EAGAIN; + + if (drm_dp_dpcd_writeb(aux, DP_TUNNELING_STATUS, val) < 0) + return -EIO; + + return val == DP_BW_REQUEST_SUCCEEDED ? 0 : -ENOSPC; +} + +static int allocate_tunnel_bw(struct drm_dp_tunnel *tunnel, int bw) +{ + struct drm_dp_tunnel_mgr *mgr = tunnel->group->mgr; + int request_bw = DIV_ROUND_UP(bw, tunnel->bw_granularity); + unsigned long wait_expires; + DEFINE_WAIT(wait); + int err; + + /* Atomic check should prevent the following. */ + if (drm_WARN_ON(mgr->dev, request_bw > MAX_DP_REQUEST_BW)) { + err = -EINVAL; + goto out; + } + + if (drm_dp_dpcd_writeb(tunnel->aux, DP_REQUEST_BW, request_bw) < 0) { + err = -EIO; + goto out; + } + + wait_expires = jiffies + msecs_to_jiffies(3000); + + for (;;) { + bool status_changed; + + err = bw_req_complete(tunnel->aux, &status_changed); + if (err != -EAGAIN) + break; + + if (status_changed) { + struct drm_dp_tunnel_regs regs; + + err = read_and_verify_tunnel_regs(tunnel, ®s, + ALLOW_ALLOCATED_BW_CHANGE); + if (err) + break; + } + + if (time_after(jiffies, wait_expires)) { + err = -ETIMEDOUT; + break; + } + + prepare_to_wait(&mgr->bw_req_queue, &wait, TASK_UNINTERRUPTIBLE); + schedule_timeout(msecs_to_jiffies(200)); + }; + + finish_wait(&mgr->bw_req_queue, &wait); + + if (err) + goto out; + + tunnel->allocated_bw = request_bw * tunnel->bw_granularity; + +out: + tun_dbg_stat(tunnel, err, "Allocating %d/%d Mb/s for tunnel: Group alloc:%d/%d Mb/s", + DPTUN_BW_ARG(request_bw * tunnel->bw_granularity), + DPTUN_BW_ARG(get_max_tunnel_bw(tunnel)), + DPTUN_BW_ARG(group_allocated_bw(tunnel->group)), + DPTUN_BW_ARG(tunnel->group->available_bw)); + + if (err == -EIO) + drm_dp_tunnel_set_io_error(tunnel); + + return err; +} + +int drm_dp_tunnel_alloc_bw(struct drm_dp_tunnel *tunnel, int bw) +{ + int err = check_tunnel(tunnel); + + if (err) + return err; + + return allocate_tunnel_bw(tunnel, bw); +} +EXPORT_SYMBOL(drm_dp_tunnel_alloc_bw); + +static int check_and_clear_status_change(struct drm_dp_tunnel *tunnel) +{ + u8 mask = DP_BW_ALLOCATION_CAPABILITY_CHANGED | DP_ESTIMATED_BW_CHANGED; + u8 val; + + if (drm_dp_dpcd_readb(tunnel->aux, DP_TUNNELING_STATUS, &val) < 0) + goto out_err; + + val &= mask; + + if (val) { + if (drm_dp_dpcd_writeb(tunnel->aux, DP_TUNNELING_STATUS, val) < 0) + goto out_err; + + return 1; + } + + if (!drm_dp_tunnel_bw_alloc_is_enabled(tunnel)) + return 0; + + /* + * Check for estimated BW changes explicitly to account for lost + * BW change notifications. + */ + if (drm_dp_dpcd_readb(tunnel->aux, DP_ESTIMATED_BW, &val) < 0) + goto out_err; + + if (val * tunnel->bw_granularity != tunnel->estimated_bw) + return 1; + + return 0; + +out_err: + drm_dp_tunnel_set_io_error(tunnel); + + return -EIO; +} + +/** + * drm_dp_tunnel_update_state: Update DP tunnel SW state with the HW state + * @tunnel: Tunnel object + * + * Update the SW state of @tunnel with the HW state. + * + * Returns 0 if the state has not changed, 1 if it has changed and got updated + * successfully and a negative error code otherwise. + */ +int drm_dp_tunnel_update_state(struct drm_dp_tunnel *tunnel) +{ + struct drm_dp_tunnel_regs regs; + bool changed = false; + int ret = check_tunnel(tunnel); + + if (ret < 0) + return ret; + + ret = check_and_clear_status_change(tunnel); + if (ret < 0) + goto out; + + if (!ret) + return 0; + + ret = read_and_verify_tunnel_regs(tunnel, ®s, 0); + if (ret) + goto out; + + if (update_dprx_caps(tunnel, ®s)) + changed = true; + + ret = update_group_available_bw(tunnel, ®s); + if (ret == 1) + changed = true; + +out: + tun_dbg_stat(tunnel, ret < 0 ? ret : 0, + "State update: Changed:%c DPRX:%dx%d Tunnel alloc:%d/%d Group alloc:%d/%d Mb/s", + yes_no_chr(changed), + tunnel->max_dprx_rate / 100, tunnel->max_dprx_lane_count, + DPTUN_BW_ARG(tunnel->allocated_bw), + DPTUN_BW_ARG(get_max_tunnel_bw(tunnel)), + DPTUN_BW_ARG(group_allocated_bw(tunnel->group)), + DPTUN_BW_ARG(tunnel->group->available_bw)); + + if (ret < 0) + return ret; + + if (changed) + return 1; + + return 0; +} +EXPORT_SYMBOL(drm_dp_tunnel_update_state); + +/* + * Returns 0 if no re-probe is needed, 1 if a re-probe is needed, + * a negative error code otherwise. + */ +int drm_dp_tunnel_handle_irq(struct drm_dp_tunnel_mgr *mgr, struct drm_dp_aux *aux) +{ + u8 val; + + if (drm_dp_dpcd_readb(aux, DP_TUNNELING_STATUS, &val) < 0) + return -EIO; + + if (val & (DP_BW_REQUEST_SUCCEEDED | DP_BW_REQUEST_FAILED)) + wake_up_all(&mgr->bw_req_queue); + + if (val & (DP_BW_ALLOCATION_CAPABILITY_CHANGED | DP_ESTIMATED_BW_CHANGED)) + return 1; + + return 0; +} +EXPORT_SYMBOL(drm_dp_tunnel_handle_irq); + +/** + * drm_dp_tunnel_max_dprx_rate - Query the maximum rate of the tunnel's DPRX + * @tunnel: Tunnel object + * + * The function is used to query the maximum link rate of the DPRX connected + * to @tunnel. Note that this rate will not be limited by the BW limit of the + * tunnel, as opposed to the standard and extended DP_MAX_LINK_RATE DPCD + * registers. + * + * Returns the maximum link rate in 10 kbit/s units. + */ +int drm_dp_tunnel_max_dprx_rate(const struct drm_dp_tunnel *tunnel) +{ + return tunnel->max_dprx_rate; +} +EXPORT_SYMBOL(drm_dp_tunnel_max_dprx_rate); + +/** + * drm_dp_tunnel_max_dprx_lane_count - Query the maximum lane count of the tunnel's DPRX + * @tunnel: Tunnel object + * + * The function is used to query the maximum lane count of the DPRX connected + * to @tunnel. Note that this lane count will not be limited by the BW limit of + * the tunnel, as opposed to the standard and extended DP_MAX_LANE_COUNT DPCD + * registers. + * + * Returns the maximum lane count. + */ +int drm_dp_tunnel_max_dprx_lane_count(const struct drm_dp_tunnel *tunnel) +{ + return tunnel->max_dprx_lane_count; +} +EXPORT_SYMBOL(drm_dp_tunnel_max_dprx_lane_count); + +/** + * drm_dp_tunnel_available_bw - Query the estimated total available BW of the tunnel + * @tunnel: Tunnel object + * + * This function is used to query the estimated total available BW of the + * tunnel. This includes the currently allocated and free BW for all the + * tunnels in @tunnel's group. The available BW is valid only after the BW + * allocation mode has been enabled for the tunnel and its state got updated + * calling drm_dp_tunnel_update_state(). + * + * Returns the @tunnel group's estimated total available bandwidth in kB/s + * units, or -1 if the available BW isn't valid (the BW allocation mode is + * not enabled or the tunnel's state hasn't been updated). + */ +int drm_dp_tunnel_available_bw(const struct drm_dp_tunnel *tunnel) +{ + return tunnel->group->available_bw; +} +EXPORT_SYMBOL(drm_dp_tunnel_available_bw); + +static struct drm_dp_tunnel_group_state * +drm_dp_tunnel_atomic_get_group_state(struct drm_atomic_state *state, + const struct drm_dp_tunnel *tunnel) +{ + return (struct drm_dp_tunnel_group_state *) + drm_atomic_get_private_obj_state(state, + &tunnel->group->base); +} + +static struct drm_dp_tunnel_state * +add_tunnel_state(struct drm_dp_tunnel_group_state *group_state, + struct drm_dp_tunnel *tunnel) +{ + struct drm_dp_tunnel_state *tunnel_state; + + tun_dbg_atomic(tunnel, + "Adding state for tunnel %p to group state %p\n", + tunnel, group_state); + + tunnel_state = kzalloc(sizeof(*tunnel_state), GFP_KERNEL); + if (!tunnel_state) + return NULL; + + tunnel_state->group_state = group_state; + + drm_dp_tunnel_ref_get(tunnel, &tunnel_state->tunnel_ref); + + INIT_LIST_HEAD(&tunnel_state->node); + list_add(&tunnel_state->node, &group_state->tunnel_states); + + return tunnel_state; +} + +void drm_dp_tunnel_atomic_clear_state(struct drm_dp_tunnel_state *tunnel_state) +{ + tun_dbg_atomic(tunnel_state->tunnel_ref.tunnel, + "Clearing state for tunnel %p\n", + tunnel_state->tunnel_ref.tunnel); + + list_del(&tunnel_state->node); + + kfree(tunnel_state->stream_bw); + drm_dp_tunnel_ref_put(&tunnel_state->tunnel_ref); + + kfree(tunnel_state); +} +EXPORT_SYMBOL(drm_dp_tunnel_atomic_clear_state); + +static void clear_tunnel_group_state(struct drm_dp_tunnel_group_state *group_state) +{ + struct drm_dp_tunnel_state *tunnel_state; + struct drm_dp_tunnel_state *tunnel_state_tmp; + + for_each_tunnel_state_safe(group_state, tunnel_state, tunnel_state_tmp) + drm_dp_tunnel_atomic_clear_state(tunnel_state); +} + +static struct drm_dp_tunnel_state * +get_tunnel_state(struct drm_dp_tunnel_group_state *group_state, + const struct drm_dp_tunnel *tunnel) +{ + struct drm_dp_tunnel_state *tunnel_state; + + for_each_tunnel_state(group_state, tunnel_state) + if (tunnel_state->tunnel_ref.tunnel == tunnel) + return tunnel_state; + + return NULL; +} + +static struct drm_dp_tunnel_state * +get_or_add_tunnel_state(struct drm_dp_tunnel_group_state *group_state, + struct drm_dp_tunnel *tunnel) +{ + struct drm_dp_tunnel_state *tunnel_state; + + tunnel_state = get_tunnel_state(group_state, tunnel); + if (tunnel_state) + return tunnel_state; + + return add_tunnel_state(group_state, tunnel); +} + +static struct drm_private_state * +tunnel_group_duplicate_state(struct drm_private_obj *obj) +{ + struct drm_dp_tunnel_group_state *group_state = to_group_state(obj->state); + struct drm_dp_tunnel_state *tunnel_state; + + group_state = kzalloc(sizeof(*group_state), GFP_KERNEL); + if (!group_state) + return NULL; + + INIT_LIST_HEAD(&group_state->tunnel_states); + + __drm_atomic_helper_private_obj_duplicate_state(obj, &group_state->base); + + for_each_tunnel_state(to_group_state(obj->state), tunnel_state) { + struct drm_dp_tunnel_state *new_tunnel_state; + + new_tunnel_state = get_or_add_tunnel_state(group_state, + tunnel_state->tunnel_ref.tunnel); + if (!new_tunnel_state) + goto out_free_state; + + new_tunnel_state->stream_mask = tunnel_state->stream_mask; + new_tunnel_state->stream_bw = kmemdup(tunnel_state->stream_bw, + sizeof(*tunnel_state->stream_bw) * + hweight32(tunnel_state->stream_mask), + GFP_KERNEL); + + if (!new_tunnel_state->stream_bw) + goto out_free_state; + } + + return &group_state->base; + +out_free_state: + clear_tunnel_group_state(group_state); + kfree(group_state); + + return NULL; +} + +static void tunnel_group_destroy_state(struct drm_private_obj *obj, struct drm_private_state *state) +{ + struct drm_dp_tunnel_group_state *group_state = to_group_state(state); + + clear_tunnel_group_state(group_state); + kfree(group_state); +} + +static const struct drm_private_state_funcs tunnel_group_funcs = { + .atomic_duplicate_state = tunnel_group_duplicate_state, + .atomic_destroy_state = tunnel_group_destroy_state, +}; + +struct drm_dp_tunnel_state * +drm_dp_tunnel_atomic_get_state(struct drm_atomic_state *state, + struct drm_dp_tunnel *tunnel) +{ + struct drm_dp_tunnel_group_state *group_state = + drm_dp_tunnel_atomic_get_group_state(state, tunnel); + struct drm_dp_tunnel_state *tunnel_state; + + if (IS_ERR(group_state)) + return ERR_CAST(group_state); + + tunnel_state = get_or_add_tunnel_state(group_state, tunnel); + if (!tunnel_state) + return ERR_PTR(-ENOMEM); + + return tunnel_state; +} +EXPORT_SYMBOL(drm_dp_tunnel_atomic_get_state); + +struct drm_dp_tunnel_state * +drm_dp_tunnel_atomic_get_new_state(struct drm_atomic_state *state, + const struct drm_dp_tunnel *tunnel) +{ + struct drm_dp_tunnel_group_state *new_group_state; + int i; + + for_each_new_group_in_state(state, new_group_state, i) + if (to_group(new_group_state->base.obj) == tunnel->group) + return get_tunnel_state(new_group_state, tunnel); + + return NULL; +} +EXPORT_SYMBOL(drm_dp_tunnel_atomic_get_new_state); + +static bool init_group(struct drm_dp_tunnel_mgr *mgr, struct drm_dp_tunnel_group *group) +{ + struct drm_dp_tunnel_group_state *group_state = kzalloc(sizeof(*group_state), GFP_KERNEL); + + if (!group_state) + return false; + + INIT_LIST_HEAD(&group_state->tunnel_states); + + group->mgr = mgr; + group->available_bw = -1; + INIT_LIST_HEAD(&group->tunnels); + + drm_atomic_private_obj_init(mgr->dev, &group->base, &group_state->base, + &tunnel_group_funcs); + + return true; +} + +static void cleanup_group(struct drm_dp_tunnel_group *group) +{ + drm_atomic_private_obj_fini(&group->base); +} + +#ifdef CONFIG_DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE +static void check_unique_stream_ids(const struct drm_dp_tunnel_group_state *group_state) +{ + const struct drm_dp_tunnel_state *tunnel_state; + u32 stream_mask = 0; + + for_each_tunnel_state(group_state, tunnel_state) { + drm_WARN(to_group(group_state->base.obj)->mgr->dev, + tunnel_state->stream_mask & stream_mask, + "[DPTUN %s]: conflicting stream IDs %x (IDs in other tunnels %x)\n", + tunnel_state->tunnel_ref.tunnel->name, + tunnel_state->stream_mask, + stream_mask); + + stream_mask |= tunnel_state->stream_mask; + } +} +#else +static void check_unique_stream_ids(const struct drm_dp_tunnel_group_state *group_state) +{ +} +#endif + +static int stream_id_to_idx(u32 stream_mask, u8 stream_id) +{ + return hweight32(stream_mask & (BIT(stream_id) - 1)); +} + +static int resize_bw_array(struct drm_dp_tunnel_state *tunnel_state, + unsigned long old_mask, unsigned long new_mask) +{ + unsigned long move_mask = old_mask & new_mask; + int *new_bws = NULL; + int id; + + WARN_ON(!new_mask); + + if (old_mask == new_mask) + return 0; + + new_bws = kcalloc(hweight32(new_mask), sizeof(*new_bws), GFP_KERNEL); + if (!new_bws) + return -ENOMEM; + + for_each_set_bit(id, &move_mask, BITS_PER_TYPE(move_mask)) + new_bws[stream_id_to_idx(new_mask, id)] = + tunnel_state->stream_bw[stream_id_to_idx(old_mask, id)]; + + kfree(tunnel_state->stream_bw); + tunnel_state->stream_bw = new_bws; + tunnel_state->stream_mask = new_mask; + + return 0; +} + +static int set_stream_bw(struct drm_dp_tunnel_state *tunnel_state, + u8 stream_id, int bw) +{ + int err; + + err = resize_bw_array(tunnel_state, + tunnel_state->stream_mask, + tunnel_state->stream_mask | BIT(stream_id)); + if (err) + return err; + + tunnel_state->stream_bw[stream_id_to_idx(tunnel_state->stream_mask, stream_id)] = bw; + + return 0; +} + +static int clear_stream_bw(struct drm_dp_tunnel_state *tunnel_state, + u8 stream_id) +{ + if (!(tunnel_state->stream_mask & ~BIT(stream_id))) { + drm_dp_tunnel_atomic_clear_state(tunnel_state); + return 0; + } + + return resize_bw_array(tunnel_state, + tunnel_state->stream_mask, + tunnel_state->stream_mask & ~BIT(stream_id)); +} + +int drm_dp_tunnel_atomic_set_stream_bw(struct drm_atomic_state *state, + struct drm_dp_tunnel *tunnel, + u8 stream_id, int bw) +{ + struct drm_dp_tunnel_group_state *new_group_state = + drm_dp_tunnel_atomic_get_group_state(state, tunnel); + struct drm_dp_tunnel_state *tunnel_state; + int err; + + if (drm_WARN_ON(tunnel->group->mgr->dev, + stream_id > BITS_PER_TYPE(tunnel_state->stream_mask))) + return -EINVAL; + + tun_dbg(tunnel, + "Setting %d Mb/s for stream %d\n", + DPTUN_BW_ARG(bw), stream_id); + + if (bw == 0) { + tunnel_state = get_tunnel_state(new_group_state, tunnel); + if (!tunnel_state) + return 0; + + return clear_stream_bw(tunnel_state, stream_id); + } + + tunnel_state = get_or_add_tunnel_state(new_group_state, tunnel); + if (drm_WARN_ON(state->dev, !tunnel_state)) + return -EINVAL; + + err = set_stream_bw(tunnel_state, stream_id, bw); + if (err) + return err; + + check_unique_stream_ids(new_group_state); + + return 0; +} +EXPORT_SYMBOL(drm_dp_tunnel_atomic_set_stream_bw); + +int drm_dp_tunnel_atomic_get_tunnel_bw(const struct drm_dp_tunnel_state *tunnel_state) +{ + int tunnel_bw = 0; + int i; + + for (i = 0; i < hweight32(tunnel_state->stream_mask); i++) + tunnel_bw += tunnel_state->stream_bw[i]; + + return tunnel_bw; +} +EXPORT_SYMBOL(drm_dp_tunnel_atomic_get_tunnel_bw); + +int drm_dp_tunnel_atomic_get_group_streams_in_state(struct drm_atomic_state *state, + const struct drm_dp_tunnel *tunnel, + u32 *stream_mask) +{ + struct drm_dp_tunnel_group_state *group_state = + drm_dp_tunnel_atomic_get_group_state(state, tunnel); + struct drm_dp_tunnel_state *tunnel_state; + + if (IS_ERR(group_state)) + return PTR_ERR(group_state); + + *stream_mask = 0; + for_each_tunnel_state(group_state, tunnel_state) + *stream_mask |= tunnel_state->stream_mask; + + return 0; +} +EXPORT_SYMBOL(drm_dp_tunnel_atomic_get_group_streams_in_state); + +static int +drm_dp_tunnel_atomic_check_group_bw(struct drm_dp_tunnel_group_state *new_group_state, + u32 *failed_stream_mask) +{ + struct drm_dp_tunnel_group *group = to_group(new_group_state->base.obj); + struct drm_dp_tunnel_state *new_tunnel_state; + u32 group_stream_mask = 0; + int group_bw = 0; + + for_each_tunnel_state(new_group_state, new_tunnel_state) { + struct drm_dp_tunnel *tunnel = new_tunnel_state->tunnel_ref.tunnel; + int max_dprx_bw = get_max_dprx_bw(tunnel); + int tunnel_bw = drm_dp_tunnel_atomic_get_tunnel_bw(new_tunnel_state); + + tun_dbg(tunnel, + "%sRequired %d/%d Mb/s total for tunnel.\n", + tunnel_bw > max_dprx_bw ? "Not enough BW: " : "", + DPTUN_BW_ARG(tunnel_bw), + DPTUN_BW_ARG(max_dprx_bw)); + + if (tunnel_bw > max_dprx_bw) { + *failed_stream_mask = new_tunnel_state->stream_mask; + return -ENOSPC; + } + + group_bw += min(roundup(tunnel_bw, tunnel->bw_granularity), + max_dprx_bw); + group_stream_mask |= new_tunnel_state->stream_mask; + } + + tun_grp_dbg(group, + "%sRequired %d/%d Mb/s total for tunnel group.\n", + group_bw > group->available_bw ? "Not enough BW: " : "", + DPTUN_BW_ARG(group_bw), + DPTUN_BW_ARG(group->available_bw)); + + if (group_bw > group->available_bw) { + *failed_stream_mask = group_stream_mask; + return -ENOSPC; + } + + return 0; +} + +int drm_dp_tunnel_atomic_check_stream_bws(struct drm_atomic_state *state, + u32 *failed_stream_mask) +{ + struct drm_dp_tunnel_group_state *new_group_state; + int i; + + for_each_new_group_in_state(state, new_group_state, i) { + int ret; + + ret = drm_dp_tunnel_atomic_check_group_bw(new_group_state, + failed_stream_mask); + if (ret) + return ret; + } + + return 0; +} +EXPORT_SYMBOL(drm_dp_tunnel_atomic_check_stream_bws); + +static void destroy_mgr(struct drm_dp_tunnel_mgr *mgr) +{ + int i; + + for (i = 0; i < mgr->group_count; i++) { + cleanup_group(&mgr->groups[i]); + drm_WARN_ON(mgr->dev, !list_empty(&mgr->groups[i].tunnels)); + } + +#ifdef CONFIG_DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE + ref_tracker_dir_exit(&mgr->ref_tracker); +#endif + + kfree(mgr->groups); + kfree(mgr); +} + +/** + * drm_dp_tunnel_mgr_create - Create a DP tunnel manager + * @i915: i915 driver object + * + * Creates a DP tunnel manager. + * + * Returns a pointer to the tunnel manager if created successfully or NULL in + * case of an error. + */ +struct drm_dp_tunnel_mgr * +drm_dp_tunnel_mgr_create(struct drm_device *dev, int max_group_count) +{ + struct drm_dp_tunnel_mgr *mgr = kzalloc(sizeof(*mgr), GFP_KERNEL); + int i; + + if (!mgr) + return NULL; + + mgr->dev = dev; + init_waitqueue_head(&mgr->bw_req_queue); + + mgr->groups = kcalloc(max_group_count, sizeof(*mgr->groups), GFP_KERNEL); + if (!mgr->groups) { + kfree(mgr); + + return NULL; + } + +#ifdef CONFIG_DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE + ref_tracker_dir_init(&mgr->ref_tracker, 16, "dptun"); +#endif + + for (i = 0; i < max_group_count; i++) { + if (!init_group(mgr, &mgr->groups[i])) { + destroy_mgr(mgr); + + return NULL; + } + + mgr->group_count++; + } + + return mgr; +} +EXPORT_SYMBOL(drm_dp_tunnel_mgr_create); + +/** + * drm_dp_tunnel_mgr_destroy - Destroy DP tunnel manager + * @mgr: Tunnel manager object + * + * Destroy the tunnel manager. + */ +void drm_dp_tunnel_mgr_destroy(struct drm_dp_tunnel_mgr *mgr) +{ + destroy_mgr(mgr); +} +EXPORT_SYMBOL(drm_dp_tunnel_mgr_destroy); diff --git a/include/drm/display/drm_dp.h b/include/drm/display/drm_dp.h index 281afff6ee4e5..8bfd5d007be8d 100644 --- a/include/drm/display/drm_dp.h +++ b/include/drm/display/drm_dp.h @@ -1382,6 +1382,66 @@ #define DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET 0x69494 #define DP_HDCP_2_2_REG_DBG_OFFSET 0x69518 +/* DP-tunneling */ +#define DP_TUNNELING_OUI 0xe0000 +#define DP_TUNNELING_OUI_BYTES 3 + +#define DP_TUNNELING_DEV_ID 0xe0003 +#define DP_TUNNELING_DEV_ID_BYTES 6 + +#define DP_TUNNELING_HW_REV 0xe0009 +#define DP_TUNNELING_HW_REV_MAJOR_SHIFT 4 +#define DP_TUNNELING_HW_REV_MAJOR_MASK (0xf << DP_TUNNELING_HW_REV_MAJOR_SHIFT) +#define DP_TUNNELING_HW_REV_MINOR_SHIFT 0 +#define DP_TUNNELING_HW_REV_MINOR_MASK (0xf << DP_TUNNELING_HW_REV_MINOR_SHIFT) + +#define DP_TUNNELING_SW_REV_MAJOR 0xe000a +#define DP_TUNNELING_SW_REV_MINOR 0xe000b + +#define DP_TUNNELING_CAPABILITIES 0xe000d +#define DP_IN_BW_ALLOCATION_MODE_SUPPORT (1 << 7) +#define DP_PANEL_REPLAY_OPTIMIZATION_SUPPORT (1 << 6) +#define DP_TUNNELING_SUPPORT (1 << 0) + +#define DP_IN_ADAPTER_INFO 0xe000e +#define DP_IN_ADAPTER_NUMBER_BITS 7 +#define DP_IN_ADAPTER_NUMBER_MASK ((1 << DP_IN_ADAPTER_NUMBER_BITS) - 1) + +#define DP_USB4_DRIVER_ID 0xe000f +#define DP_USB4_DRIVER_ID_BITS 4 +#define DP_USB4_DRIVER_ID_MASK ((1 << DP_USB4_DRIVER_ID_BITS) - 1) + +#define DP_USB4_DRIVER_BW_CAPABILITY 0xe0020 +#define DP_USB4_DRIVER_BW_ALLOCATION_MODE_SUPPORT (1 << 7) + +#define DP_IN_ADAPTER_TUNNEL_INFORMATION 0xe0021 +#define DP_GROUP_ID_BITS 3 +#define DP_GROUP_ID_MASK ((1 << DP_GROUP_ID_BITS) - 1) + +#define DP_BW_GRANULARITY 0xe0022 +#define DP_BW_GRANULARITY_MASK 0x3 + +#define DP_ESTIMATED_BW 0xe0023 +#define DP_ALLOCATED_BW 0xe0024 + +#define DP_TUNNELING_STATUS 0xe0025 +#define DP_BW_ALLOCATION_CAPABILITY_CHANGED (1 << 3) +#define DP_ESTIMATED_BW_CHANGED (1 << 2) +#define DP_BW_REQUEST_SUCCEEDED (1 << 1) +#define DP_BW_REQUEST_FAILED (1 << 0) + +#define DP_TUNNELING_MAX_LINK_RATE 0xe0028 + +#define DP_TUNNELING_MAX_LANE_COUNT 0xe0029 +#define DP_TUNNELING_MAX_LANE_COUNT_MASK 0x1f + +#define DP_DPTX_BW_ALLOCATION_MODE_CONTROL 0xe0030 +#define DP_DISPLAY_DRIVER_BW_ALLOCATION_MODE_ENABLE (1 << 7) +#define DP_UNMASK_BW_ALLOCATION_IRQ (1 << 6) + +#define DP_REQUEST_BW 0xe0031 +#define MAX_DP_REQUEST_BW 255 + /* LTTPR: Link Training (LT)-tunable PHY Repeaters */ #define DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV 0xf0000 /* 1.3 */ #define DP_MAX_LINK_RATE_PHY_REPEATER 0xf0001 /* 1.4a */ diff --git a/include/drm/display/drm_dp_tunnel.h b/include/drm/display/drm_dp_tunnel.h new file mode 100644 index 0000000000000..f6449b1b4e6e9 --- /dev/null +++ b/include/drm/display/drm_dp_tunnel.h @@ -0,0 +1,270 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef __DRM_DP_TUNNEL_H__ +#define __DRM_DP_TUNNEL_H__ + +#include +#include +#include + +struct drm_dp_aux; + +struct drm_device; + +struct drm_atomic_state; +struct drm_dp_tunnel_mgr; +struct drm_dp_tunnel_state; + +struct ref_tracker; + +struct drm_dp_tunnel_ref { + struct drm_dp_tunnel *tunnel; +#ifdef CONFIG_DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE + struct ref_tracker *tracker; +#endif +}; + +#ifdef CONFIG_DRM_DISPLAY_DP_TUNNEL + +struct drm_dp_tunnel * +drm_dp_tunnel_get_untracked(struct drm_dp_tunnel *tunnel); +void drm_dp_tunnel_put_untracked(struct drm_dp_tunnel *tunnel); + +#ifdef CONFIG_DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE +struct drm_dp_tunnel * +drm_dp_tunnel_get(struct drm_dp_tunnel *tunnel, struct ref_tracker **tracker); + +void +drm_dp_tunnel_put(struct drm_dp_tunnel *tunnel, struct ref_tracker **tracker); +#else +#define drm_dp_tunnel_get(tunnel, tracker) \ + drm_dp_tunnel_get_untracked(tunnel) + +#define drm_dp_tunnel_put(tunnel, tracker) \ + drm_dp_tunnel_put_untracked(tunnel) + +#endif + +static inline void drm_dp_tunnel_ref_get(struct drm_dp_tunnel *tunnel, + struct drm_dp_tunnel_ref *tunnel_ref) +{ + tunnel_ref->tunnel = drm_dp_tunnel_get(tunnel, &tunnel_ref->tracker); +} + +static inline void drm_dp_tunnel_ref_put(struct drm_dp_tunnel_ref *tunnel_ref) +{ + drm_dp_tunnel_put(tunnel_ref->tunnel, &tunnel_ref->tracker); +} + +struct drm_dp_tunnel * +drm_dp_tunnel_detect(struct drm_dp_tunnel_mgr *mgr, + struct drm_dp_aux *aux); +int drm_dp_tunnel_destroy(struct drm_dp_tunnel *tunnel); + +int drm_dp_tunnel_enable_bw_alloc(struct drm_dp_tunnel *tunnel); +int drm_dp_tunnel_disable_bw_alloc(struct drm_dp_tunnel *tunnel); +bool drm_dp_tunnel_bw_alloc_is_enabled(const struct drm_dp_tunnel *tunnel); +int drm_dp_tunnel_alloc_bw(struct drm_dp_tunnel *tunnel, int bw); +int drm_dp_tunnel_check_state(struct drm_dp_tunnel *tunnel); +int drm_dp_tunnel_update_state(struct drm_dp_tunnel *tunnel); + +void drm_dp_tunnel_set_io_error(struct drm_dp_tunnel *tunnel); + +int drm_dp_tunnel_handle_irq(struct drm_dp_tunnel_mgr *mgr, + struct drm_dp_aux *aux); + +int drm_dp_tunnel_max_dprx_rate(const struct drm_dp_tunnel *tunnel); +int drm_dp_tunnel_max_dprx_lane_count(const struct drm_dp_tunnel *tunnel); +int drm_dp_tunnel_available_bw(const struct drm_dp_tunnel *tunnel); + +const char *drm_dp_tunnel_name(const struct drm_dp_tunnel *tunnel); + +struct drm_dp_tunnel_state * +drm_dp_tunnel_atomic_get_state(struct drm_atomic_state *state, + struct drm_dp_tunnel *tunnel); +struct drm_dp_tunnel_state * +drm_dp_tunnel_atomic_get_new_state(struct drm_atomic_state *state, + const struct drm_dp_tunnel *tunnel); + +void drm_dp_tunnel_atomic_clear_state(struct drm_dp_tunnel_state *tunnel_state); + +int drm_dp_tunnel_atomic_set_stream_bw(struct drm_atomic_state *state, + struct drm_dp_tunnel *tunnel, + u8 stream_id, int bw); +int drm_dp_tunnel_atomic_get_group_streams_in_state(struct drm_atomic_state *state, + const struct drm_dp_tunnel *tunnel, + u32 *stream_mask); + +int drm_dp_tunnel_atomic_check_stream_bws(struct drm_atomic_state *state, + u32 *failed_stream_mask); + +int drm_dp_tunnel_atomic_get_tunnel_bw(const struct drm_dp_tunnel_state *tunnel_state); + +struct drm_dp_tunnel_mgr * +drm_dp_tunnel_mgr_create(struct drm_device *dev, int max_group_count); +void drm_dp_tunnel_mgr_destroy(struct drm_dp_tunnel_mgr *mgr); + +#else + +static inline struct drm_dp_tunnel * +drm_dp_tunnel_get_untracked(struct drm_dp_tunnel *tunnel) +{ + return NULL; +} + +static inline void +drm_dp_tunnel_put_untracked(struct drm_dp_tunnel *tunnel) {} + +static inline struct drm_dp_tunnel * +drm_dp_tunnel_get(struct drm_dp_tunnel *tunnel, struct ref_tracker **tracker) +{ + return NULL; +} + +static inline void +drm_dp_tunnel_put(struct drm_dp_tunnel *tunnel, struct ref_tracker **tracker) {} + +static inline void drm_dp_tunnel_ref_get(struct drm_dp_tunnel *tunnel, + struct drm_dp_tunnel_ref *tunnel_ref) {} +static inline void drm_dp_tunnel_ref_put(struct drm_dp_tunnel_ref *tunnel_ref) {} + +static inline struct drm_dp_tunnel * +drm_dp_tunnel_detect(struct drm_dp_tunnel_mgr *mgr, + struct drm_dp_aux *aux) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline int +drm_dp_tunnel_destroy(struct drm_dp_tunnel *tunnel) +{ + return 0; +} + +static inline int drm_dp_tunnel_enable_bw_alloc(struct drm_dp_tunnel *tunnel) +{ + return -EOPNOTSUPP; +} + +static inline int drm_dp_tunnel_disable_bw_alloc(struct drm_dp_tunnel *tunnel) +{ + return -EOPNOTSUPP; +} + +static inline bool drm_dp_tunnel_bw_alloc_is_enabled(const struct drm_dp_tunnel *tunnel) +{ + return false; +} + +static inline int +drm_dp_tunnel_alloc_bw(struct drm_dp_tunnel *tunnel, int bw) +{ + return -EOPNOTSUPP; +} + +static inline int +drm_dp_tunnel_check_state(struct drm_dp_tunnel *tunnel) +{ + return -EOPNOTSUPP; +} + +static inline int +drm_dp_tunnel_update_state(struct drm_dp_tunnel *tunnel) +{ + return -EOPNOTSUPP; +} + +static inline void drm_dp_tunnel_set_io_error(struct drm_dp_tunnel *tunnel) {} +static inline int +drm_dp_tunnel_handle_irq(struct drm_dp_tunnel_mgr *mgr, + struct drm_dp_aux *aux) +{ + return -EOPNOTSUPP; +} + +static inline int +drm_dp_tunnel_max_dprx_rate(const struct drm_dp_tunnel *tunnel) +{ + return 0; +} + +static inline int +drm_dp_tunnel_max_dprx_lane_count(const struct drm_dp_tunnel *tunnel) +{ + return 0; +} + +static inline int +drm_dp_tunnel_available_bw(const struct drm_dp_tunnel *tunnel) +{ + return -1; +} + +static inline const char * +drm_dp_tunnel_name(const struct drm_dp_tunnel *tunnel) +{ + return NULL; +} + +static inline struct drm_dp_tunnel_state * +drm_dp_tunnel_atomic_get_state(struct drm_atomic_state *state, + struct drm_dp_tunnel *tunnel) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline struct drm_dp_tunnel_state * +drm_dp_tunnel_atomic_get_new_state(struct drm_atomic_state *state, + const struct drm_dp_tunnel *tunnel) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline void +drm_dp_tunnel_atomic_clear_state(struct drm_dp_tunnel_state *tunnel_state) {} + +static inline int +drm_dp_tunnel_atomic_set_stream_bw(struct drm_atomic_state *state, + struct drm_dp_tunnel *tunnel, + u8 stream_id, int bw) +{ + return -EOPNOTSUPP; +} + +static inline int +drm_dp_tunnel_atomic_get_group_streams_in_state(struct drm_atomic_state *state, + const struct drm_dp_tunnel *tunnel, + u32 *stream_mask) +{ + return -EOPNOTSUPP; +} + +static inline int +drm_dp_tunnel_atomic_check_stream_bws(struct drm_atomic_state *state, + u32 *failed_stream_mask) +{ + return -EOPNOTSUPP; +} + +static inline int +drm_dp_tunnel_atomic_get_tunnel_bw(const struct drm_dp_tunnel_state *tunnel_state) +{ + return 0; +} + +static inline struct drm_dp_tunnel_mgr * +drm_dp_tunnel_mgr_create(struct drm_device *dev, int max_group_count) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline +void drm_dp_tunnel_mgr_destroy(struct drm_dp_tunnel_mgr *mgr) {} + + +#endif /* CONFIG_DRM_DISPLAY_DP_TUNNEL */ + +#endif /* __DRM_DP_TUNNEL_H__ */ From patchwork Tue Jan 23 13:26:36 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [27/44] drm/i915/dp: Add support to notify MST connectors to retry modesets From: Imre Deak X-Patchwork-Id: 575615 Message-Id: <20240123132653.413364-28-imre.deak@intel.com> To: intel-gfx-trybot@lists.freedesktop.org Date: Tue, 23 Jan 2024 15:26:36 +0200 On shared (Thunderbolt) links with DP tunnels, the modeset may need to be retried on all connectors on the link due to a link BW limitation arising only after the atomic check phase. To support this add a helper function queuing a work to retry the modeset on a given port's connector and at the same time any MST connector with streams through the same port. A follow-up change enabling the DP tunnel Bandwidth Allocation Mode will take this into use. Signed-off-by: Imre Deak --- drivers/gpu/drm/i915/display/intel_display.c | 5 +- drivers/gpu/drm/i915/display/intel_dp.c | 55 ++++++++++++++++++- drivers/gpu/drm/i915/display/intel_dp.h | 8 +++ .../drm/i915/display/intel_dp_link_training.c | 3 +- drivers/gpu/drm/i915/display/intel_dp_mst.c | 2 + 5 files changed, 67 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index a92e959c8ac7b..0caebbb3e2dbb 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -8060,8 +8060,9 @@ void intel_hpd_poll_fini(struct drm_i915_private *i915) /* Kill all the work that may have been queued by hpd. */ drm_connector_list_iter_begin(&i915->drm, &conn_iter); for_each_intel_connector_iter(connector, &conn_iter) { - if (connector->modeset_retry_work.func) - cancel_work_sync(&connector->modeset_retry_work); + if (connector->modeset_retry_work.func && + cancel_work_sync(&connector->modeset_retry_work)) + drm_connector_put(&connector->base); if (connector->hdcp.shim) { cancel_delayed_work_sync(&connector->hdcp.check_work); cancel_work_sync(&connector->hdcp.prop_work); diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index ab415f41924d7..4e36c2c39888e 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -2837,6 +2837,50 @@ intel_dp_audio_compute_config(struct intel_encoder *encoder, intel_dp_is_uhbr(pipe_config); } +void intel_dp_queue_modeset_retry_work(struct intel_connector *connector) +{ + struct drm_i915_private *i915 = to_i915(connector->base.dev); + + drm_connector_get(&connector->base); + if (!queue_work(i915->unordered_wq, &connector->modeset_retry_work)) + drm_connector_put(&connector->base); +} + +void +intel_dp_queue_modeset_retry_for_link(struct intel_atomic_state *state, + struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct intel_connector *connector; + struct intel_digital_connector_state *iter_conn_state; + struct intel_dp *intel_dp; + int i; + + if (conn_state) { + connector = to_intel_connector(conn_state->connector); + intel_dp_queue_modeset_retry_work(connector); + + return; + } + + if (drm_WARN_ON(&i915->drm, + !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))) + return; + + intel_dp = enc_to_intel_dp(encoder); + + for_each_new_intel_connector_in_state(state, connector, iter_conn_state, i) { + (void)iter_conn_state; + + if (connector->mst_port != intel_dp) + continue; + + intel_dp_queue_modeset_retry_work(connector); + } +} + int intel_dp_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, @@ -6436,6 +6480,14 @@ static void intel_dp_modeset_retry_work_fn(struct work_struct *work) mutex_unlock(&connector->dev->mode_config.mutex); /* Send Hotplug uevent so userspace can reprobe */ drm_kms_helper_connector_hotplug_event(connector); + + drm_connector_put(connector); +} + +void intel_dp_init_modeset_retry_work(struct intel_connector *connector) +{ + INIT_WORK(&connector->modeset_retry_work, + intel_dp_modeset_retry_work_fn); } bool @@ -6452,8 +6504,7 @@ intel_dp_init_connector(struct intel_digital_port *dig_port, int type; /* Initialize the work for modeset in case of link train failure */ - INIT_WORK(&intel_connector->modeset_retry_work, - intel_dp_modeset_retry_work_fn); + intel_dp_init_modeset_retry_work(intel_connector); if (drm_WARN(dev, dig_port->max_lanes < 1, "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n", diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h index 530cc97bc42f4..105c2086310db 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.h +++ b/drivers/gpu/drm/i915/display/intel_dp.h @@ -23,6 +23,8 @@ struct intel_digital_port; struct intel_dp; struct intel_encoder; +struct work_struct; + struct link_config_limits { int min_rate, max_rate; int min_lane_count, max_lane_count; @@ -43,6 +45,12 @@ void intel_dp_adjust_compliance_config(struct intel_dp *intel_dp, bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); int intel_dp_min_bpp(enum intel_output_format output_format); +void intel_dp_init_modeset_retry_work(struct intel_connector *connector); +void intel_dp_queue_modeset_retry_work(struct intel_connector *connector); +void intel_dp_queue_modeset_retry_for_link(struct intel_atomic_state *state, + struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state); bool intel_dp_init_connector(struct intel_digital_port *dig_port, struct intel_connector *intel_connector); void intel_dp_set_link_params(struct intel_dp *intel_dp, diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c index 1abfafbbfa757..7b140cbf8dd31 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c @@ -1075,7 +1075,6 @@ static void intel_dp_schedule_fallback_link_training(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { struct intel_connector *intel_connector = intel_dp->attached_connector; - struct drm_i915_private *i915 = dp_to_i915(intel_dp); if (!intel_digital_port_connected(&dp_to_dig_port(intel_dp)->base)) { lt_dbg(intel_dp, DP_PHY_DPRX, "Link Training failed on disconnected sink.\n"); @@ -1093,7 +1092,7 @@ static void intel_dp_schedule_fallback_link_training(struct intel_dp *intel_dp, } /* Schedule a Hotplug Uevent to userspace to start modeset */ - queue_work(i915->unordered_wq, &intel_connector->modeset_retry_work); + intel_dp_queue_modeset_retry_work(intel_connector); } /* Perform the link training on all LTTPRs and the DPRX on a link. */ diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 5fa25a5a36b55..b15e43ebf138b 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -1542,6 +1542,8 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo intel_connector->port = port; drm_dp_mst_get_port_malloc(port); + intel_dp_init_modeset_retry_work(intel_connector); + intel_connector->dp.dsc_decompression_aux = drm_dp_mst_dsc_aux_for_port(port); intel_dp_mst_read_decompression_port_dsc_caps(intel_dp, intel_connector); intel_connector->dp.dsc_hblank_expansion_quirk = From patchwork Tue Jan 23 13:26:37 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [28/44] drm/i915/dp: Use drm_dp_max_dprx_data_rate() From: Imre Deak X-Patchwork-Id: 575614 Message-Id: <20240123132653.413364-29-imre.deak@intel.com> To: intel-gfx-trybot@lists.freedesktop.org Date: Tue, 23 Jan 2024 15:26:37 +0200 Instead of intel_dp_max_data_rate() use the equivalent drm_dp_max_dprx_data_rate() which was copied from the former one in a previous patch. Signed-off-by: Imre Deak --- drivers/gpu/drm/i915/display/intel_display.c | 2 +- drivers/gpu/drm/i915/display/intel_dp.c | 62 +++----------------- drivers/gpu/drm/i915/display/intel_dp.h | 1 - drivers/gpu/drm/i915/display/intel_dp_mst.c | 2 +- 4 files changed, 10 insertions(+), 57 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 0caebbb3e2dbb..b9f985a5e705b 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -2478,7 +2478,7 @@ intel_link_compute_m_n(u16 bits_per_pixel_x16, int nlanes, u32 link_symbol_clock = intel_dp_link_symbol_clock(link_clock); u32 data_m = intel_dp_effective_data_rate(pixel_clock, bits_per_pixel_x16, bw_overhead); - u32 data_n = intel_dp_max_data_rate(link_clock, nlanes); + u32 data_n = drm_dp_max_dprx_data_rate(link_clock, nlanes); /* * Windows/BIOS uses fixed M/N values always. Follow suit. diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 4e36c2c39888e..c7b06a9b197cc 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -383,52 +383,6 @@ int intel_dp_effective_data_rate(int pixel_clock, int bpp_x16, 1000000 * 16 * 8); } -/* - * Given a link rate and lanes, get the data bandwidth. - * - * Data bandwidth is the actual payload rate, which depends on the data - * bandwidth efficiency and the link rate. - * - * For 8b/10b channel encoding, SST and non-FEC, the data bandwidth efficiency - * is 80%. For example, for a 1.62 Gbps link, 1.62*10^9 bps * 0.80 * (1/8) = - * 162000 kBps. With 8-bit symbols, we have 162000 kHz symbol clock. Just by - * coincidence, the port clock in kHz matches the data bandwidth in kBps, and - * they equal the link bit rate in Gbps multiplied by 100000. (Note that this no - * longer holds for data bandwidth as soon as FEC or MST is taken into account!) - * - * For 128b/132b channel encoding, the data bandwidth efficiency is 96.71%. For - * example, for a 10 Gbps link, 10*10^9 bps * 0.9671 * (1/8) = 1208875 - * kBps. With 32-bit symbols, we have 312500 kHz symbol clock. The value 1000000 - * does not match the symbol clock, the port clock (not even if you think in - * terms of a byte clock), nor the data bandwidth. It only matches the link bit - * rate in units of 10000 bps. - */ -int -intel_dp_max_data_rate(int max_link_rate, int max_lanes) -{ - int ch_coding_efficiency = - drm_dp_bw_channel_coding_efficiency(drm_dp_is_uhbr_rate(max_link_rate)); - int max_link_rate_kbps = max_link_rate * 10; - - /* - * UHBR rates always use 128b/132b channel encoding, and have - * 97.71% data bandwidth efficiency. Consider max_link_rate the - * link bit rate in units of 10000 bps. - */ - /* - * Lower than UHBR rates always use 8b/10b channel encoding, and have - * 80% data bandwidth efficiency for SST non-FEC. However, this turns - * out to be a nop by coincidence: - * - * int max_link_rate_kbps = max_link_rate * 10; - * max_link_rate_kbps = DIV_ROUND_DOWN_ULL(max_link_rate_kbps * 8, 10); - * max_link_rate = max_link_rate_kbps / 8; - */ - return DIV_ROUND_DOWN_ULL(mul_u32_u32(max_link_rate_kbps * max_lanes, - ch_coding_efficiency), - 1000000 * 8); -} - bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); @@ -658,7 +612,7 @@ static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp, int mode_rate, max_rate; mode_rate = intel_dp_link_required(fixed_mode->clock, 18); - max_rate = intel_dp_max_data_rate(link_rate, lane_count); + max_rate = drm_dp_max_dprx_data_rate(link_rate, lane_count); if (mode_rate > max_rate) return false; @@ -1260,7 +1214,7 @@ intel_dp_mode_valid(struct drm_connector *_connector, max_link_clock = intel_dp_max_link_rate(intel_dp); max_lanes = intel_dp_max_lane_count(intel_dp); - max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); + max_rate = drm_dp_max_dprx_data_rate(max_link_clock, max_lanes); mode_rate = intel_dp_link_required(target_clock, intel_dp_mode_min_output_bpp(connector, mode)); @@ -1610,8 +1564,8 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp, for (lane_count = limits->min_lane_count; lane_count <= limits->max_lane_count; lane_count <<= 1) { - link_avail = intel_dp_max_data_rate(link_rate, - lane_count); + link_avail = drm_dp_max_dprx_data_rate(link_rate, + lane_count); if (mode_rate <= link_avail) { pipe_config->lane_count = lane_count; @@ -2462,8 +2416,8 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, "DP link rate required %i available %i\n", intel_dp_link_required(adjusted_mode->crtc_clock, to_bpp_int_roundup(pipe_config->dsc.compressed_bpp_x16)), - intel_dp_max_data_rate(pipe_config->port_clock, - pipe_config->lane_count)); + drm_dp_max_dprx_data_rate(pipe_config->port_clock, + pipe_config->lane_count)); } else { drm_dbg_kms(&i915->drm, "DP lane count %d clock %d bpp %d\n", pipe_config->lane_count, pipe_config->port_clock, @@ -2473,8 +2427,8 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, "DP link rate required %i available %i\n", intel_dp_link_required(adjusted_mode->crtc_clock, pipe_config->pipe_bpp), - intel_dp_max_data_rate(pipe_config->port_clock, - pipe_config->lane_count)); + drm_dp_max_dprx_data_rate(pipe_config->port_clock, + pipe_config->lane_count)); } return 0; } diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h index 105c2086310db..46f79747f807d 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.h +++ b/drivers/gpu/drm/i915/display/intel_dp.h @@ -113,7 +113,6 @@ bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp); int intel_dp_link_required(int pixel_clock, int bpp); int intel_dp_effective_data_rate(int pixel_clock, int bpp_x16, int bw_overhead); -int intel_dp_max_data_rate(int max_link_rate, int max_lanes); bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp); bool intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index b15e43ebf138b..cfcc157b7d41d 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -1295,7 +1295,7 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector, max_link_clock = intel_dp_max_link_rate(intel_dp); max_lanes = intel_dp_max_lane_count(intel_dp); - max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); + max_rate = drm_dp_max_dprx_data_rate(max_link_clock, max_lanes); mode_rate = intel_dp_link_required(mode->clock, min_bpp); ret = drm_modeset_lock(&mgr->base.lock, ctx); From patchwork Tue Jan 23 13:26:38 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [29/44] drm/i915/dp: Factor out intel_dp_config_required_rate() From: Imre Deak X-Patchwork-Id: 575593 Message-Id: <20240123132653.413364-30-imre.deak@intel.com> To: intel-gfx-trybot@lists.freedesktop.org Date: Tue, 23 Jan 2024 15:26:38 +0200 Factor out intel_dp_config_required_rate() used by a follow-up patch enabling the DP tunnel BW allocation mode. Signed-off-by: Imre Deak --- drivers/gpu/drm/i915/display/intel_dp.c | 43 +++++++++++-------------- drivers/gpu/drm/i915/display/intel_dp.h | 1 + 2 files changed, 20 insertions(+), 24 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index c7b06a9b197cc..0a5c60428ffb7 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -2338,6 +2338,17 @@ intel_dp_compute_config_limits(struct intel_dp *intel_dp, limits); } +int intel_dp_config_required_rate(const struct intel_crtc_state *crtc_state) +{ + const struct drm_display_mode *adjusted_mode = + &crtc_state->hw.adjusted_mode; + int bpp = crtc_state->dsc.compression_enable ? + to_bpp_int_roundup(crtc_state->dsc.compressed_bpp_x16) : + crtc_state->pipe_bpp; + + return intel_dp_link_required(adjusted_mode->crtc_clock, bpp); +} + static int intel_dp_compute_link_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, @@ -2405,31 +2416,15 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, return ret; } - if (pipe_config->dsc.compression_enable) { - drm_dbg_kms(&i915->drm, - "DP lane count %d clock %d Input bpp %d Compressed bpp " BPP_X16_FMT "\n", - pipe_config->lane_count, pipe_config->port_clock, - pipe_config->pipe_bpp, - BPP_X16_ARGS(pipe_config->dsc.compressed_bpp_x16)); + drm_dbg_kms(&i915->drm, + "DP lane count %d clock %d bpp input %d compressed " BPP_X16_FMT " link rate required %d available %d\n", + pipe_config->lane_count, pipe_config->port_clock, + pipe_config->pipe_bpp, + BPP_X16_ARGS(pipe_config->dsc.compressed_bpp_x16), + intel_dp_config_required_rate(pipe_config), + drm_dp_max_dprx_data_rate(pipe_config->port_clock, + pipe_config->lane_count)); - drm_dbg_kms(&i915->drm, - "DP link rate required %i available %i\n", - intel_dp_link_required(adjusted_mode->crtc_clock, - to_bpp_int_roundup(pipe_config->dsc.compressed_bpp_x16)), - drm_dp_max_dprx_data_rate(pipe_config->port_clock, - pipe_config->lane_count)); - } else { - drm_dbg_kms(&i915->drm, "DP lane count %d clock %d bpp %d\n", - pipe_config->lane_count, pipe_config->port_clock, - pipe_config->pipe_bpp); - - drm_dbg_kms(&i915->drm, - "DP link rate required %i available %i\n", - intel_dp_link_required(adjusted_mode->crtc_clock, - pipe_config->pipe_bpp), - drm_dp_max_dprx_data_rate(pipe_config->port_clock, - pipe_config->lane_count)); - } return 0; } diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h index 46f79747f807d..37274e3c2902f 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.h +++ b/drivers/gpu/drm/i915/display/intel_dp.h @@ -102,6 +102,7 @@ void intel_dp_mst_suspend(struct drm_i915_private *dev_priv); void intel_dp_mst_resume(struct drm_i915_private *dev_priv); int intel_dp_max_link_rate(struct intel_dp *intel_dp); int intel_dp_max_lane_count(struct intel_dp *intel_dp); +int intel_dp_config_required_rate(const struct intel_crtc_state *crtc_state); int intel_dp_rate_select(struct intel_dp *intel_dp, int rate); void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock, From patchwork Tue Jan 23 13:26:39 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [30/44] drm/i915/dp: Export intel_dp_max_common_rate/lane_count() From: Imre Deak X-Patchwork-Id: 575620 Message-Id: <20240123132653.413364-31-imre.deak@intel.com> To: intel-gfx-trybot@lists.freedesktop.org Date: Tue, 23 Jan 2024 15:26:39 +0200 Export intel_dp_max_common_rate() and intel_dp_max_lane_count() used by a follow-up patch enabling the DP tunnel BW allocation mode. Signed-off-by: Imre Deak --- drivers/gpu/drm/i915/display/intel_dp.c | 4 ++-- drivers/gpu/drm/i915/display/intel_dp.h | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 0a5c60428ffb7..f40706c5d1aad 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -309,7 +309,7 @@ static int intel_dp_common_rate(struct intel_dp *intel_dp, int index) } /* Theoretical max between source and sink */ -static int intel_dp_max_common_rate(struct intel_dp *intel_dp) +int intel_dp_max_common_rate(struct intel_dp *intel_dp) { return intel_dp_common_rate(intel_dp, intel_dp->num_common_rates - 1); } @@ -326,7 +326,7 @@ static int intel_dp_max_source_lane_count(struct intel_digital_port *dig_port) } /* Theoretical max between source and sink */ -static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp) +int intel_dp_max_common_lane_count(struct intel_dp *intel_dp) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); int source_max = intel_dp_max_source_lane_count(dig_port); diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h index 37274e3c2902f..a7906d8738c4a 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.h +++ b/drivers/gpu/drm/i915/display/intel_dp.h @@ -104,6 +104,8 @@ int intel_dp_max_link_rate(struct intel_dp *intel_dp); int intel_dp_max_lane_count(struct intel_dp *intel_dp); int intel_dp_config_required_rate(const struct intel_crtc_state *crtc_state); int intel_dp_rate_select(struct intel_dp *intel_dp, int rate); +int intel_dp_max_common_rate(struct intel_dp *intel_dp); +int intel_dp_max_common_lane_count(struct intel_dp *intel_dp); void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock, u8 *link_bw, u8 *rate_select); From patchwork Tue Jan 23 13:26:40 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [31/44] drm/i915/dp: Factor out intel_dp_update_sink_caps() From: Imre Deak X-Patchwork-Id: 575616 Message-Id: <20240123132653.413364-32-imre.deak@intel.com> To: intel-gfx-trybot@lists.freedesktop.org Date: Tue, 23 Jan 2024 15:26:40 +0200 Factor out a function updating the sink's link rate and lane count capabilities, used by a follow-up patch enabling the DP tunnel BW allocation mode. Signed-off-by: Imre Deak --- drivers/gpu/drm/i915/display/intel_dp.c | 11 ++++++++--- drivers/gpu/drm/i915/display/intel_dp.h | 1 + 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index f40706c5d1aad..23434d0aba188 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -3949,6 +3949,13 @@ intel_dp_has_sink_count(struct intel_dp *intel_dp) &intel_dp->desc); } +void intel_dp_update_sink_caps(struct intel_dp *intel_dp) +{ + intel_dp_set_sink_rates(intel_dp); + intel_dp_set_max_sink_lane_count(intel_dp); + intel_dp_set_common_rates(intel_dp); +} + static bool intel_dp_get_dpcd(struct intel_dp *intel_dp) { @@ -3965,9 +3972,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, drm_dp_is_branch(intel_dp->dpcd)); - intel_dp_set_sink_rates(intel_dp); - intel_dp_set_max_sink_lane_count(intel_dp); - intel_dp_set_common_rates(intel_dp); + intel_dp_update_sink_caps(intel_dp); } if (intel_dp_has_sink_count(intel_dp)) { diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h index a7906d8738c4a..49553e43add22 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.h +++ b/drivers/gpu/drm/i915/display/intel_dp.h @@ -106,6 +106,7 @@ int intel_dp_config_required_rate(const struct intel_crtc_state *crtc_state); int intel_dp_rate_select(struct intel_dp *intel_dp, int rate); int intel_dp_max_common_rate(struct intel_dp *intel_dp); int intel_dp_max_common_lane_count(struct intel_dp *intel_dp); +void intel_dp_update_sink_caps(struct intel_dp *intel_dp); void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock, u8 *link_bw, u8 *rate_select); From patchwork Tue Jan 23 13:26:41 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [32/44] drm/i915/dp: Factor out intel_dp_read_dprx_caps() From: Imre Deak X-Patchwork-Id: 575595 Message-Id: <20240123132653.413364-33-imre.deak@intel.com> To: intel-gfx-trybot@lists.freedesktop.org Date: Tue, 23 Jan 2024 15:26:41 +0200 Factor out a function to read the sink's DPRX capabilities used by a follow-up patch enabling the DP tunnel BW allocation mode. Signed-off-by: Imre Deak --- .../drm/i915/display/intel_dp_link_training.c | 30 +++++++++++++++---- .../drm/i915/display/intel_dp_link_training.h | 1 + 2 files changed, 26 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c index 7b140cbf8dd31..fb84ca98bb7ab 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c @@ -162,6 +162,28 @@ static int intel_dp_init_lttpr(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEI return lttpr_count; } +int intel_dp_read_dprx_caps(struct intel_dp *intel_dp, u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + + if (intel_dp_is_edp(intel_dp)) + return 0; + + /* + * Detecting LTTPRs must be avoided on platforms with an AUX timeout + * period < 3.2ms. (see DP Standard v2.0, 2.11.2, 3.6.6.1). + */ + if (DISPLAY_VER(i915) >= 10 && !IS_GEMINILAKE(i915)) + if (drm_dp_dpcd_probe(&intel_dp->aux, + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV)) + return -EIO; + + if (drm_dp_read_dpcd_caps(&intel_dp->aux, dpcd)) + return -EIO; + + return 0; +} + /** * intel_dp_init_lttpr_and_dprx_caps - detect LTTPR and DPRX caps, init the LTTPR link training mode * @intel_dp: Intel DP struct @@ -192,12 +214,10 @@ int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp) if (!intel_dp_is_edp(intel_dp) && (DISPLAY_VER(i915) >= 10 && !IS_GEMINILAKE(i915))) { u8 dpcd[DP_RECEIVER_CAP_SIZE]; + int err = intel_dp_read_dprx_caps(intel_dp, dpcd); - if (drm_dp_dpcd_probe(&intel_dp->aux, DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV)) - return -EIO; - - if (drm_dp_read_dpcd_caps(&intel_dp->aux, dpcd)) - return -EIO; + if (err != 0) + return err; lttpr_count = intel_dp_init_lttpr(intel_dp, dpcd); } diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.h b/drivers/gpu/drm/i915/display/intel_dp_link_training.h index 2c8f2775891b0..19836a8a4f904 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.h +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.h @@ -11,6 +11,7 @@ struct intel_crtc_state; struct intel_dp; +int intel_dp_read_dprx_caps(struct intel_dp *intel_dp, u8 dpcd[DP_RECEIVER_CAP_SIZE]); int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp); void intel_dp_get_adjust_train(struct intel_dp *intel_dp, From patchwork Tue Jan 23 13:26:42 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [33/44] drm/i915/dp: Add intel_dp_max_link_data_rate() From: Imre Deak X-Patchwork-Id: 575617 Message-Id: <20240123132653.413364-34-imre.deak@intel.com> To: intel-gfx-trybot@lists.freedesktop.org Date: Tue, 23 Jan 2024 15:26:42 +0200 Add intel_dp_max_link_data_rate() to get the link BW vs. the sink DPRX BW used by a follow-up patch enabling the DP tunnel BW allocation mode. The link BW can be below the DPRX BW due to a BW limitation on a link shared by multiple sinks. Signed-off-by: Imre Deak --- drivers/gpu/drm/i915/display/intel_dp.c | 32 +++++++++++++++++---- drivers/gpu/drm/i915/display/intel_dp.h | 2 ++ drivers/gpu/drm/i915/display/intel_dp_mst.c | 3 +- 3 files changed, 30 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 23434d0aba188..9cd675c6d0ee8 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -383,6 +383,22 @@ int intel_dp_effective_data_rate(int pixel_clock, int bpp_x16, 1000000 * 16 * 8); } +/** + * intel_dp_max_link_data_rate: Calculate the maximum rate for the given link params + * @intel_dp: Intel DP object + * @max_dprx_rate: Maximum data rate of the DPRX + * @max_dprx_lanes: Maximum lane count of the DPRX + * + * Calculate the maximum data rate for the provided link parameters. + * + * Returns the maximum data rate in kBps units. + */ +int intel_dp_max_link_data_rate(struct intel_dp *intel_dp, + int max_dprx_rate, int max_dprx_lanes) +{ + return drm_dp_max_dprx_data_rate(max_dprx_rate, max_dprx_lanes); +} + bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); @@ -612,7 +628,7 @@ static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp, int mode_rate, max_rate; mode_rate = intel_dp_link_required(fixed_mode->clock, 18); - max_rate = drm_dp_max_dprx_data_rate(link_rate, lane_count); + max_rate = intel_dp_max_link_data_rate(intel_dp, link_rate, lane_count); if (mode_rate > max_rate) return false; @@ -1214,7 +1230,8 @@ intel_dp_mode_valid(struct drm_connector *_connector, max_link_clock = intel_dp_max_link_rate(intel_dp); max_lanes = intel_dp_max_lane_count(intel_dp); - max_rate = drm_dp_max_dprx_data_rate(max_link_clock, max_lanes); + max_rate = intel_dp_max_link_data_rate(intel_dp, max_link_clock, max_lanes); + mode_rate = intel_dp_link_required(target_clock, intel_dp_mode_min_output_bpp(connector, mode)); @@ -1564,8 +1581,10 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp, for (lane_count = limits->min_lane_count; lane_count <= limits->max_lane_count; lane_count <<= 1) { - link_avail = drm_dp_max_dprx_data_rate(link_rate, - lane_count); + link_avail = intel_dp_max_link_data_rate(intel_dp, + link_rate, + lane_count); + if (mode_rate <= link_avail) { pipe_config->lane_count = lane_count; @@ -2422,8 +2441,9 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, pipe_config->pipe_bpp, BPP_X16_ARGS(pipe_config->dsc.compressed_bpp_x16), intel_dp_config_required_rate(pipe_config), - drm_dp_max_dprx_data_rate(pipe_config->port_clock, - pipe_config->lane_count)); + intel_dp_max_link_data_rate(intel_dp, + pipe_config->port_clock, + pipe_config->lane_count)); return 0; } diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h index 49553e43add22..8b0dfbf06afff 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.h +++ b/drivers/gpu/drm/i915/display/intel_dp.h @@ -117,6 +117,8 @@ bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp); int intel_dp_link_required(int pixel_clock, int bpp); int intel_dp_effective_data_rate(int pixel_clock, int bpp_x16, int bw_overhead); +int intel_dp_max_link_data_rate(struct intel_dp *intel_dp, + int max_dprx_rate, int max_dprx_lanes); bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp); bool intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index cfcc157b7d41d..520393dc8b453 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -1295,7 +1295,8 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector, max_link_clock = intel_dp_max_link_rate(intel_dp); max_lanes = intel_dp_max_lane_count(intel_dp); - max_rate = drm_dp_max_dprx_data_rate(max_link_clock, max_lanes); + max_rate = intel_dp_max_link_data_rate(intel_dp, + max_link_clock, max_lanes); mode_rate = intel_dp_link_required(mode->clock, min_bpp); ret = drm_modeset_lock(&mgr->base.lock, ctx); From patchwork Tue Jan 23 13:26:43 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [34/44] drm/i915/dp: Add way to get active pipes with syncing commits From: Imre Deak X-Patchwork-Id: 575619 Message-Id: <20240123132653.413364-35-imre.deak@intel.com> To: intel-gfx-trybot@lists.freedesktop.org Date: Tue, 23 Jan 2024 15:26:43 +0200 Add a way to get the active pipes through a given DP port by syncing against a related pending non-blocking commit. Atm intel_dp_get_active_pipes() will only try to sync a given pipe and if that would block ignore the pipe. A follow-up change enabling the DP tunnel BW allocation mode will need to ensure that all active pipes are returned. A follow-up patchset will add a no-sync mode as well, needed by the current intel_tc_port_link_reset() user of it, which atm incorrectly ignores active pipes for which the syncing would block (but otherwise doesn't require an actual syncing). Signed-off-by: Imre Deak --- drivers/gpu/drm/i915/display/intel_dp.c | 25 +++++++++++++++++++++---- drivers/gpu/drm/i915/display/intel_dp.h | 6 ++++++ drivers/gpu/drm/i915/display/intel_tc.c | 4 +++- 3 files changed, 30 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 9cd675c6d0ee8..323475569ee7f 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -5019,6 +5019,7 @@ static bool intel_dp_has_connector(struct intel_dp *intel_dp, int intel_dp_get_active_pipes(struct intel_dp *intel_dp, struct drm_modeset_acquire_ctx *ctx, + enum intel_dp_get_pipes_mode mode, u8 *pipe_mask) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); @@ -5053,9 +5054,23 @@ int intel_dp_get_active_pipes(struct intel_dp *intel_dp, if (!crtc_state->hw.active) continue; - if (conn_state->commit && - !try_wait_for_completion(&conn_state->commit->hw_done)) - continue; + if (conn_state->commit) { + bool synced; + + switch (mode) { + case INTEL_DP_GET_PIPES_TRY_SYNC: + if (!try_wait_for_completion(&conn_state->commit->hw_done)) + continue; + break; + case INTEL_DP_GET_PIPES_SYNC: + synced = wait_for_completion_timeout(&conn_state->commit->hw_done, + msecs_to_jiffies(5000)); + drm_WARN_ON(&i915->drm, !synced); + break; + default: + MISSING_CASE(mode); + } + } *pipe_mask |= BIT(crtc->pipe); } @@ -5092,7 +5107,9 @@ int intel_dp_retrain_link(struct intel_encoder *encoder, if (!intel_dp_needs_link_retrain(intel_dp)) return 0; - ret = intel_dp_get_active_pipes(intel_dp, ctx, &pipe_mask); + ret = intel_dp_get_active_pipes(intel_dp, ctx, + INTEL_DP_GET_PIPES_TRY_SYNC, + &pipe_mask); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h index 8b0dfbf06afff..1a7b87787dfa9 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.h +++ b/drivers/gpu/drm/i915/display/intel_dp.h @@ -25,6 +25,11 @@ struct intel_encoder; struct work_struct; +enum intel_dp_get_pipes_mode { + INTEL_DP_GET_PIPES_TRY_SYNC, + INTEL_DP_GET_PIPES_SYNC, +}; + struct link_config_limits { int min_rate, max_rate; int min_lane_count, max_lane_count; @@ -59,6 +64,7 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, int link_rate, u8 lane_count); int intel_dp_get_active_pipes(struct intel_dp *intel_dp, struct drm_modeset_acquire_ctx *ctx, + enum intel_dp_get_pipes_mode mode, u8 *pipe_mask); int intel_dp_retrain_link(struct intel_encoder *encoder, struct drm_modeset_acquire_ctx *ctx); diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c index f34743e6eeed2..561d6f97ff189 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.c +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -1655,7 +1655,9 @@ static int reset_link_commit(struct intel_tc_port *tc, if (ret) return ret; - ret = intel_dp_get_active_pipes(intel_dp, ctx, &pipe_mask); + ret = intel_dp_get_active_pipes(intel_dp, ctx, + INTEL_DP_GET_PIPES_TRY_SYNC, + &pipe_mask); if (ret) return ret; From patchwork Tue Jan 23 13:26:44 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Subject: [35/44] drm/i915/dp: Add support for DP tunnel BW allocation From: Imre Deak X-Patchwork-Id: 575618 Message-Id: <20240123132653.413364-36-imre.deak@intel.com> To: intel-gfx-trybot@lists.freedesktop.org Date: Tue, 23 Jan 2024 15:26:44 +0200 Add support to enable the DP tunnel BW allocation mode. Follow-up patches will call the required helpers added here to prepare for a modeset on a link with DP tunnels, the last change in the patchset actually enabling BWA. With BWA enabled, the driver will expose the full mode list a display supports, regardless of any BW limitation on a shared (Thunderbolt) link. Such BW limits will be checked against only during a modeset, when the driver has the full knowledge of each display's BW requirement. If the link BW changes in a way that a connector's modelist may also change, userspace will get a hotplug notification for all the connectors sharing the same link (so it can adjust the mode used for a display). The BW limitation can change at any point, asynchronously to modesets on a given connector, so a modeset can fail even though the atomic check for it passed. In such scenarios userspace will get a bad link notification and in response is supposed to retry the modeset. Signed-off-by: Imre Deak --- drivers/gpu/drm/i915/Kconfig | 13 + drivers/gpu/drm/i915/Kconfig.debug | 1 + drivers/gpu/drm/i915/Makefile | 3 + drivers/gpu/drm/i915/display/intel_atomic.c | 2 + .../gpu/drm/i915/display/intel_display_core.h | 1 + .../drm/i915/display/intel_display_types.h | 9 + .../gpu/drm/i915/display/intel_dp_tunnel.c | 642 ++++++++++++++++++ .../gpu/drm/i915/display/intel_dp_tunnel.h | 131 ++++ 8 files changed, 802 insertions(+) create mode 100644 drivers/gpu/drm/i915/display/intel_dp_tunnel.c create mode 100644 drivers/gpu/drm/i915/display/intel_dp_tunnel.h diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index b5d6e3352071f..4636913c17868 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -155,6 +155,19 @@ config DRM_I915_PXP protected session and manage the status of the alive software session, as well as its life cycle. +config DRM_I915_DP_TUNNEL + bool "Enable DP tunnel support" + depends on DRM_I915 + select DRM_DISPLAY_DP_TUNNEL + default y + help + Choose this option to detect DP tunnels and enable the Bandwidth + Allocation mode for such tunnels. This allows using the maximum + resolution allowed by the link BW on all displays sharing the + link BW, for instance on a Thunderbolt link. + + If in doubt, say "Y". + menu "drm/i915 Debugging" depends on DRM_I915 depends on EXPERT diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug index 5b7162076850c..bc18e2d9ea05d 100644 --- a/drivers/gpu/drm/i915/Kconfig.debug +++ b/drivers/gpu/drm/i915/Kconfig.debug @@ -28,6 +28,7 @@ config DRM_I915_DEBUG select STACKDEPOT select STACKTRACE select DRM_DP_AUX_CHARDEV + select DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE if DRM_I915_DP_TUNNEL select X86_MSR # used by igt/pm_rpm select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks) select DRM_DEBUG_MM if DRM=y diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index c13f14edb5088..3ef6ed41e62b4 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -369,6 +369,9 @@ i915-y += \ display/vlv_dsi.o \ display/vlv_dsi_pll.o +i915-$(CONFIG_DRM_I915_DP_TUNNEL) += \ + display/intel_dp_tunnel.o + i915-y += \ i915_perf.o diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c index ec0d5168b5035..96ab37e158995 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic.c +++ b/drivers/gpu/drm/i915/display/intel_atomic.c @@ -29,6 +29,7 @@ * See intel_atomic_plane.c for the plane-specific atomic functionality. */ +#include #include #include #include @@ -38,6 +39,7 @@ #include "intel_atomic.h" #include "intel_cdclk.h" #include "intel_display_types.h" +#include "intel_dp_tunnel.h" #include "intel_global_state.h" #include "intel_hdcp.h" #include "intel_psr.h" diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h index a90f1aa201be8..0993d25a0a686 100644 --- a/drivers/gpu/drm/i915/display/intel_display_core.h +++ b/drivers/gpu/drm/i915/display/intel_display_core.h @@ -522,6 +522,7 @@ struct intel_display { } wq; /* Grouping using named structs. Keep sorted. */ + struct drm_dp_tunnel_mgr *dp_tunnel_mgr; struct intel_audio audio; struct intel_dpll dpll; struct intel_fbc *fbc[I915_MAX_FBCS]; diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index ae2e8cff9d691..b79db78b27728 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -33,6 +33,7 @@ #include #include +#include #include #include #include @@ -677,6 +678,8 @@ struct intel_atomic_state { struct intel_shared_dpll_state shared_dpll[I915_NUM_PLLS]; + struct intel_dp_tunnel_inherited_state *dp_tunnel_state; + /* * Current watermarks can't be trusted during hardware readout, so * don't bother calculating intermediate watermarks. @@ -1372,6 +1375,9 @@ struct intel_crtc_state { struct drm_dsc_config config; } dsc; + /* DP tunnel used for BW allocation. */ + struct drm_dp_tunnel_ref dp_tunnel_ref; + /* HSW+ linetime watermarks */ u16 linetime; u16 ips_linetime; @@ -1775,6 +1781,9 @@ struct intel_dp { /* connector directly attached - won't be use for modeset in mst world */ struct intel_connector *attached_connector; + struct drm_dp_tunnel *tunnel; + bool tunnel_suspended:1; + /* mst connector list */ struct intel_dp_mst_encoder *mst_encoders[I915_MAX_PIPES]; struct drm_dp_mst_topology_mgr mst_mgr; diff --git a/drivers/gpu/drm/i915/display/intel_dp_tunnel.c b/drivers/gpu/drm/i915/display/intel_dp_tunnel.c new file mode 100644 index 0000000000000..52dd0108a6c13 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_dp_tunnel.c @@ -0,0 +1,642 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2023 Intel Corporation + */ + +#include "i915_drv.h" + +#include + +#include "intel_atomic.h" +#include "intel_display_limits.h" +#include "intel_display_types.h" +#include "intel_dp.h" +#include "intel_dp_link_training.h" +#include "intel_dp_mst.h" +#include "intel_dp_tunnel.h" +#include "intel_link_bw.h" + +struct intel_dp_tunnel_inherited_state { + struct { + struct drm_dp_tunnel_ref tunnel_ref; + } tunnels[I915_MAX_PIPES]; +}; + +static void destroy_tunnel(struct intel_dp *intel_dp) +{ + drm_dp_tunnel_destroy(intel_dp->tunnel); + intel_dp->tunnel = NULL; +} + +void intel_dp_tunnel_disconnect(struct intel_dp *intel_dp) +{ + if (!intel_dp->tunnel) + return; + + destroy_tunnel(intel_dp); +} + +void intel_dp_tunnel_destroy(struct intel_dp *intel_dp) +{ + if (!intel_dp->tunnel) + return; + + if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp)) + drm_dp_tunnel_disable_bw_alloc(intel_dp->tunnel); + + destroy_tunnel(intel_dp); +} + +static int kbytes_to_mbits(int kbytes) +{ + return DIV_ROUND_UP(kbytes * 8, 1000); +} + +static int get_current_link_bw(struct intel_dp *intel_dp, + bool *below_dprx_bw) +{ + int rate = intel_dp_max_common_rate(intel_dp); + int lane_count = intel_dp_max_common_lane_count(intel_dp); + int bw; + + bw = intel_dp_max_link_data_rate(intel_dp, rate, lane_count); + *below_dprx_bw = bw < drm_dp_max_dprx_data_rate(rate, lane_count); + + return bw; +} + +static int update_tunnel_state(struct intel_dp *intel_dp) +{ + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + bool old_bw_below_dprx; + bool new_bw_below_dprx; + int old_bw; + int new_bw; + int ret; + + old_bw = get_current_link_bw(intel_dp, &old_bw_below_dprx); + + ret = drm_dp_tunnel_update_state(intel_dp->tunnel); + if (ret < 0) { + drm_dbg_kms(&i915->drm, + "[DPTUN %s][ENCODER:%d:%s] State update failed (err %pe)\n", + drm_dp_tunnel_name(intel_dp->tunnel), + encoder->base.base.id, + encoder->base.name, + ERR_PTR(ret)); + + return ret; + } + + if (ret == 0 || + !drm_dp_tunnel_bw_alloc_is_enabled(intel_dp->tunnel)) + return 0; + + intel_dp_update_sink_caps(intel_dp); + + new_bw = get_current_link_bw(intel_dp, &new_bw_below_dprx); + + /* Suppress the notification if the mode list can't change due to bw. */ + if (old_bw_below_dprx == new_bw_below_dprx && + !new_bw_below_dprx) + return 0; + + drm_dbg_kms(&i915->drm, + "[DPTUN %s][ENCODER:%d:%s] Notify users about BW change: %d -> %d\n", + drm_dp_tunnel_name(intel_dp->tunnel), + encoder->base.base.id, + encoder->base.name, + kbytes_to_mbits(old_bw), + kbytes_to_mbits(new_bw)); + + return 1; +} + +static int allocate_initial_tunnel_bw(struct intel_dp *intel_dp, + struct drm_modeset_acquire_ctx *ctx) +{ + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + const struct intel_crtc *crtc; + int tunnel_bw = 0; + u8 pipe_mask; + int err; + + err = intel_dp_get_active_pipes(intel_dp, ctx, + INTEL_DP_GET_PIPES_SYNC, + &pipe_mask); + if (err) + return err; + + for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, pipe_mask) { + const struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + int stream_bw = intel_dp_config_required_rate(crtc_state); + + drm_dbg_kms(&i915->drm, + "[DPTUN %s][ENCODER:%d:%s][CRTC:%d:%s] Initial BW for stream %d: %d/%d Mb/s\n", + drm_dp_tunnel_name(intel_dp->tunnel), + encoder->base.base.id, + encoder->base.name, + crtc->base.base.id, + crtc->base.name, + crtc->pipe, + kbytes_to_mbits(stream_bw), + kbytes_to_mbits(tunnel_bw)); + + tunnel_bw += stream_bw; + } + + err = drm_dp_tunnel_alloc_bw(intel_dp->tunnel, tunnel_bw); + if (err) { + drm_dbg_kms(&i915->drm, + "[DPTUN %s][ENCODER:%d:%s] Initial BW allocation failed (err %pe)\n", + drm_dp_tunnel_name(intel_dp->tunnel), + encoder->base.base.id, + encoder->base.name, + ERR_PTR(err)); + + return err; + } + + return update_tunnel_state(intel_dp); +} + +static int detect_new_tunnel(struct intel_dp *intel_dp, struct drm_modeset_acquire_ctx *ctx) +{ + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + struct drm_dp_tunnel *tunnel; + int ret; + + tunnel = drm_dp_tunnel_detect(i915->display.dp_tunnel_mgr, + &intel_dp->aux); + if (IS_ERR(tunnel)) + return PTR_ERR(tunnel); + + intel_dp->tunnel = tunnel; + + ret = drm_dp_tunnel_enable_bw_alloc(intel_dp->tunnel); + if (ret) { + if (ret == -EOPNOTSUPP) + return 0; + + drm_dbg_kms(&i915->drm, + "[DPTUN %s][ENCODER:%d:%s] Failed to enable BW allocation mode (ret %pe)\n", + drm_dp_tunnel_name(intel_dp->tunnel), + encoder->base.base.id, + encoder->base.name, + ERR_PTR(ret)); + + /* Keep the tunnel with BWA disabled */ + return 0; + } + + ret = allocate_initial_tunnel_bw(intel_dp, ctx); + if (ret < 0) + intel_dp_tunnel_destroy(intel_dp); + + return ret; +} + +int intel_dp_tunnel_detect(struct intel_dp *intel_dp, struct drm_modeset_acquire_ctx *ctx) +{ + int ret; + + if (intel_dp_is_edp(intel_dp)) + return 0; + + if (intel_dp->tunnel) { + ret = update_tunnel_state(intel_dp); + if (ret >= 0) + return ret; + + /* Try to recreate the tunnel after an update error. */ + intel_dp_tunnel_destroy(intel_dp); + } + + ret = detect_new_tunnel(intel_dp, ctx); + if (ret >= 0 || ret == -EDEADLK) + return ret; + + return ret; +} + +bool intel_dp_tunnel_bw_alloc_is_enabled(struct intel_dp *intel_dp) +{ + return intel_dp->tunnel && + drm_dp_tunnel_bw_alloc_is_enabled(intel_dp->tunnel); +} + +void intel_dp_tunnel_suspend(struct intel_dp *intel_dp) +{ + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_connector *connector = intel_dp->attached_connector; + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + + if (!intel_dp_tunnel_bw_alloc_is_enabled(intel_dp)) + return; + + drm_dbg_kms(&i915->drm, "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Suspend\n", + drm_dp_tunnel_name(intel_dp->tunnel), + connector->base.base.id, connector->base.name, + encoder->base.base.id, encoder->base.name); + + intel_dp->tunnel_suspended = true; +} + +void intel_dp_tunnel_resume(struct intel_dp *intel_dp, bool dpcd_updated) +{ + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_connector *connector = intel_dp->attached_connector; + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + u8 dpcd[DP_RECEIVER_CAP_SIZE]; + int err = 0; + + if (!intel_dp->tunnel_suspended) + return; + + intel_dp->tunnel_suspended = false; + + drm_dbg_kms(&i915->drm, "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Resume\n", + drm_dp_tunnel_name(intel_dp->tunnel), + connector->base.base.id, connector->base.name, + encoder->base.base.id, encoder->base.name); + + /* DPRX caps read required by tunnel detection */ + if (!dpcd_updated) + err = intel_dp_read_dprx_caps(intel_dp, dpcd); + + if (err) + drm_dp_tunnel_set_io_error(intel_dp->tunnel); + else + err = drm_dp_tunnel_enable_bw_alloc(intel_dp->tunnel); + /* TODO: allocate initial BW */ + + if (!err) + return; + + drm_dbg_kms(&i915->drm, + "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Tunnel can't be resumed, will drop and redect it (err %pe)\n", + drm_dp_tunnel_name(intel_dp->tunnel), + connector->base.base.id, connector->base.name, + encoder->base.base.id, encoder->base.name, + ERR_PTR(err)); +} + +static struct drm_dp_tunnel * +get_inherited_tunnel_state(struct intel_atomic_state *state, + const struct intel_crtc *crtc) +{ + if (!state->dp_tunnel_state) + return NULL; + + return state->dp_tunnel_state->tunnels[crtc->pipe].tunnel_ref.tunnel; +} + +static int +add_inherited_tunnel_state(struct intel_atomic_state *state, + struct drm_dp_tunnel *tunnel, + const struct intel_crtc *crtc) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + struct drm_dp_tunnel *old_tunnel; + + old_tunnel = get_inherited_tunnel_state(state, crtc); + if (old_tunnel) { + drm_WARN_ON(&i915->drm, old_tunnel != tunnel); + return 0; + } + + if (!state->dp_tunnel_state) { + state->dp_tunnel_state = kzalloc(sizeof(*state->dp_tunnel_state), GFP_KERNEL); + if (!state->dp_tunnel_state) + return -ENOMEM; + } + + drm_dp_tunnel_ref_get(tunnel, + &state->dp_tunnel_state->tunnels[crtc->pipe].tunnel_ref); + + return 0; +} + +static int check_inherited_tunnel_state(struct intel_atomic_state *state, + struct intel_dp *intel_dp, + const struct intel_digital_connector_state *old_conn_state) +{ + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + const struct intel_connector *connector = + to_intel_connector(old_conn_state->base.connector); + struct intel_crtc *old_crtc; + const struct intel_crtc_state *old_crtc_state; + + /* + * If a BWA tunnel gets detected only after the corresponding + * connector got enabled already without a BWA tunnel, or a different + * BWA tunnel (which was removed meanwhile) the old CRTC state won't + * contain the state of the current tunnel. This tunnel still has a + * reserved BW, which needs to be released, add the state for such + * inherited tunnels separately only to this atomic state. + */ + if (!intel_dp_tunnel_bw_alloc_is_enabled(intel_dp)) + return 0; + + if (!old_conn_state->base.crtc) + return 0; + + old_crtc = to_intel_crtc(old_conn_state->base.crtc); + old_crtc_state = intel_atomic_get_old_crtc_state(state, old_crtc); + + if (!old_crtc_state->hw.active || + old_crtc_state->dp_tunnel_ref.tunnel == intel_dp->tunnel) + return 0; + + drm_dbg_kms(&i915->drm, + "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s][CRTC:%d:%s] Adding state for inherited tunnel %p\n", + drm_dp_tunnel_name(intel_dp->tunnel), + connector->base.base.id, + connector->base.name, + encoder->base.base.id, + encoder->base.name, + old_crtc->base.base.id, + old_crtc->base.name, + intel_dp->tunnel); + + return add_inherited_tunnel_state(state, intel_dp->tunnel, old_crtc); +} + +void intel_dp_tunnel_atomic_cleanup_inherited_state(struct intel_atomic_state *state) +{ + enum pipe pipe; + + if (!state->dp_tunnel_state) + return; + + for_each_pipe(to_i915(state->base.dev), pipe) + if (state->dp_tunnel_state->tunnels[pipe].tunnel_ref.tunnel) + drm_dp_tunnel_ref_put(&state->dp_tunnel_state->tunnels[pipe].tunnel_ref); + + kfree(state->dp_tunnel_state); + state->dp_tunnel_state = NULL; +} + +static int intel_dp_tunnel_atomic_add_group_state(struct intel_atomic_state *state, + struct drm_dp_tunnel *tunnel) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + u32 pipe_mask; + int err; + + if (!tunnel) + return 0; + + err = drm_dp_tunnel_atomic_get_group_streams_in_state(&state->base, + tunnel, &pipe_mask); + if (err) + return err; + + drm_WARN_ON(&i915->drm, pipe_mask & ~((1 << I915_MAX_PIPES) - 1)); + + return intel_modeset_pipes_in_mask_early(state, "DPTUN", pipe_mask); +} + +int intel_dp_tunnel_atomic_add_state_for_crtc(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + const struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + const struct drm_dp_tunnel_state *tunnel_state; + struct drm_dp_tunnel *tunnel = new_crtc_state->dp_tunnel_ref.tunnel; + + if (!tunnel) + return 0; + + tunnel_state = drm_dp_tunnel_atomic_get_state(&state->base, tunnel); + if (IS_ERR(tunnel_state)) + return PTR_ERR(tunnel_state); + + return 0; +} + +static int check_group_state(struct intel_atomic_state *state, + struct intel_dp *intel_dp, + const struct intel_connector *connector, + struct intel_crtc *crtc) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + const struct intel_crtc_state *crtc_state; + + crtc_state = intel_atomic_get_new_crtc_state(state, crtc); + + if (!crtc_state->dp_tunnel_ref.tunnel) + return 0; + + drm_dbg_kms(&i915->drm, + "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s][CRTC:%d:%s] Adding group state for tunnel %p\n", + drm_dp_tunnel_name(intel_dp->tunnel), + connector->base.base.id, + connector->base.name, + encoder->base.base.id, + encoder->base.name, + crtc->base.base.id, + crtc->base.name, + intel_dp->tunnel); + + return intel_dp_tunnel_atomic_add_group_state(state, crtc_state->dp_tunnel_ref.tunnel); +} + +int intel_dp_tunnel_atomic_check_state(struct intel_atomic_state *state, + struct intel_dp *intel_dp, + struct intel_connector *connector) +{ + const struct intel_digital_connector_state *old_conn_state = + intel_atomic_get_new_connector_state(state, connector); + const struct intel_digital_connector_state *new_conn_state = + intel_atomic_get_new_connector_state(state, connector); + int err; + + if (old_conn_state->base.crtc) { + err = check_group_state(state, intel_dp, connector, + to_intel_crtc(old_conn_state->base.crtc)); + if (err) + return err; + } + + if (new_conn_state->base.crtc && + new_conn_state->base.crtc != old_conn_state->base.crtc) { + err = check_group_state(state, intel_dp, connector, + to_intel_crtc(new_conn_state->base.crtc)); + if (err) + return err; + } + + return check_inherited_tunnel_state(state, intel_dp, old_conn_state); +} + +void intel_dp_tunnel_atomic_compute_stream_bw(struct intel_atomic_state *state, + struct intel_dp *intel_dp, + const struct intel_connector *connector, + struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + const struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + int required_rate = intel_dp_config_required_rate(crtc_state); + + if (!intel_dp_tunnel_bw_alloc_is_enabled(intel_dp)) + return; + + drm_dbg_kms(&i915->drm, + "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s][CRTC:%d:%s] Stream %d required BW %d Mb/s\n", + drm_dp_tunnel_name(intel_dp->tunnel), + connector->base.base.id, + connector->base.name, + encoder->base.base.id, + encoder->base.name, + crtc->base.base.id, + crtc->base.name, + crtc->pipe, + kbytes_to_mbits(required_rate)); + + drm_dp_tunnel_atomic_set_stream_bw(&state->base, intel_dp->tunnel, + crtc->pipe, required_rate); + + drm_dp_tunnel_ref_get(intel_dp->tunnel, + &crtc_state->dp_tunnel_ref); +} + +/** + * intel_dp_tunnel_atomic_check_link - Check the DP tunnel atomic state + * @state: intel atomic state + * @limits: link BW limits + * + * Check the link configuration for all DP tunnels in @state. If the + * configuration is invalid @limits will be updated if possible to + * reduce the total BW, after which the configuration for all CRTCs in + * @state must be recomputed with the updated @limits. + * + * Returns: + * - 0 if the confugration is valid + * - %-EAGAIN, if the configuration is invalid and @limits got updated + * with fallback values with which the configuration of all CRTCs in + * @state must be recomputed + * - Other negative error, if the configuration is invalid without a + * fallback possibility, or the check failed for another reason + */ +int intel_dp_tunnel_atomic_check_link(struct intel_atomic_state *state, + struct intel_link_bw_limits *limits) +{ + u32 failed_stream_mask; + int err; + + err = drm_dp_tunnel_atomic_check_stream_bws(&state->base, + &failed_stream_mask); + if (err != -ENOSPC) + return err; + + err = intel_link_bw_reduce_bpp(state, limits, + failed_stream_mask, "DP tunnel link BW"); + + return err ? : -EAGAIN; +} + +void intel_dp_tunnel_atomic_alloc_bw(struct intel_atomic_state *state, + struct intel_encoder *encoder, + const struct intel_crtc_state *new_crtc_state, + const struct drm_connector_state *new_conn_state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + struct drm_dp_tunnel *tunnel = new_crtc_state->dp_tunnel_ref.tunnel; + const struct drm_dp_tunnel_state *new_tunnel_state; + int err; + + if (!tunnel) + return; + + new_tunnel_state = drm_dp_tunnel_atomic_get_new_state(&state->base, tunnel); + + err = drm_dp_tunnel_alloc_bw(tunnel, + drm_dp_tunnel_atomic_get_tunnel_bw(new_tunnel_state)); + if (!err) + return; + + if (!intel_digital_port_connected(encoder)) + return; + + drm_dbg_kms(&i915->drm, + "[DPTUN %s][ENCODER:%d:%s] BW allocation failed on a connected sink (err %pe)\n", + drm_dp_tunnel_name(tunnel), + encoder->base.base.id, + encoder->base.name, + ERR_PTR(err)); + + intel_dp_queue_modeset_retry_for_link(state, encoder, new_crtc_state, new_conn_state); +} + +void intel_dp_tunnel_atomic_free_bw(struct intel_atomic_state *state, + struct intel_encoder *encoder, + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_crtc *old_crtc = to_intel_crtc(old_crtc_state->uapi.crtc); + struct drm_dp_tunnel *tunnel; + int err; + + tunnel = get_inherited_tunnel_state(state, old_crtc); + if (!tunnel) + tunnel = old_crtc_state->dp_tunnel_ref.tunnel; + + if (!tunnel) + return; + + err = drm_dp_tunnel_alloc_bw(tunnel, 0); + if (!err) + return; + + if (!intel_digital_port_connected(encoder)) + return; + + drm_dbg_kms(&i915->drm, + "[DPTUN %s][ENCODER:%d:%s] BW freeing failed on a connected sink (err %pe)\n", + drm_dp_tunnel_name(tunnel), + encoder->base.base.id, + encoder->base.name, + ERR_PTR(err)); + + intel_dp_queue_modeset_retry_for_link(state, encoder, old_crtc_state, old_conn_state); +} + +int intel_dp_tunnel_mgr_init(struct drm_i915_private *i915) +{ + struct drm_dp_tunnel_mgr *tunnel_mgr; + struct drm_connector_list_iter connector_list_iter; + struct intel_connector *connector; + int dp_connectors = 0; + + drm_connector_list_iter_begin(&i915->drm, &connector_list_iter); + for_each_intel_connector_iter(connector, &connector_list_iter) { + if (connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) + continue; + + dp_connectors++; + } + drm_connector_list_iter_end(&connector_list_iter); + + tunnel_mgr = drm_dp_tunnel_mgr_create(&i915->drm, dp_connectors); + if (IS_ERR(tunnel_mgr)) + return PTR_ERR(tunnel_mgr); + + i915->display.dp_tunnel_mgr = tunnel_mgr; + + return 0; +} + +void intel_dp_tunnel_mgr_cleanup(struct drm_i915_private *i915) +{ + drm_dp_tunnel_mgr_destroy(i915->display.dp_tunnel_mgr); + i915->display.dp_tunnel_mgr = NULL; +} diff --git a/drivers/gpu/drm/i915/display/intel_dp_tunnel.h b/drivers/gpu/drm/i915/display/intel_dp_tunnel.h new file mode 100644 index 0000000000000..bedba3ba9ad8d --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_dp_tunnel.h @@ -0,0 +1,131 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef __INTEL_DP_TUNNEL_H__ +#define __INTEL_DP_TUNNEL_H__ + +#include +#include + +struct drm_i915_private; +struct drm_connector_state; +struct drm_modeset_acquire_ctx; + +struct intel_atomic_state; +struct intel_connector; +struct intel_crtc; +struct intel_crtc_state; +struct intel_dp; +struct intel_encoder; +struct intel_link_bw_limits; + +#if defined(CONFIG_DRM_I915_DP_TUNNEL) && defined(I915) + +int intel_dp_tunnel_detect(struct intel_dp *intel_dp, struct drm_modeset_acquire_ctx *ctx); +void intel_dp_tunnel_disconnect(struct intel_dp *intel_dp); +void intel_dp_tunnel_destroy(struct intel_dp *intel_dp); +void intel_dp_tunnel_resume(struct intel_dp *intel_dp, bool dpcd_updated); +void intel_dp_tunnel_suspend(struct intel_dp *intel_dp); + +bool intel_dp_tunnel_bw_alloc_is_enabled(struct intel_dp *intel_dp); + +void +intel_dp_tunnel_atomic_cleanup_inherited_state(struct intel_atomic_state *state); + +void intel_dp_tunnel_atomic_compute_stream_bw(struct intel_atomic_state *state, + struct intel_dp *intel_dp, + const struct intel_connector *connector, + struct intel_crtc_state *crtc_state); + +int intel_dp_tunnel_atomic_add_state_for_crtc(struct intel_atomic_state *state, + struct intel_crtc *crtc); +int intel_dp_tunnel_atomic_check_link(struct intel_atomic_state *state, + struct intel_link_bw_limits *limits); +int intel_dp_tunnel_atomic_check_state(struct intel_atomic_state *state, + struct intel_dp *intel_dp, + struct intel_connector *connector); +void intel_dp_tunnel_atomic_alloc_bw(struct intel_atomic_state *state, + struct intel_encoder *encoder, + const struct intel_crtc_state *new_crtc_state, + const struct drm_connector_state *new_conn_state); +void intel_dp_tunnel_atomic_free_bw(struct intel_atomic_state *state, + struct intel_encoder *encoder, + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state); + +int intel_dp_tunnel_mgr_init(struct drm_i915_private *i915); +void intel_dp_tunnel_mgr_cleanup(struct drm_i915_private *i915); + +#else + +static inline int +intel_dp_tunnel_detect(struct intel_dp *intel_dp, struct drm_modeset_acquire_ctx *ctx) +{ + return -EOPNOTSUPP; +} + +static inline void intel_dp_tunnel_disconnect(struct intel_dp *intel_dp) {} +static inline void intel_dp_tunnel_destroy(struct intel_dp *intel_dp) {} +static inline void intel_dp_tunnel_resume(struct intel_dp *intel_dp, bool dpcd_updated) {} +static inline void intel_dp_tunnel_suspend(struct intel_dp *intel_dp) {} + +static inline bool intel_dp_tunnel_bw_alloc_is_enabled(struct intel_dp *intel_dp) +{ + return false; +} + +static inline void +intel_dp_tunnel_atomic_cleanup_inherited_state(struct intel_atomic_state *state) {} + +static inline void +intel_dp_tunnel_atomic_compute_stream_bw(struct intel_atomic_state *state, + struct intel_dp *intel_dp, + const struct intel_connector *connector, + struct intel_crtc_state *crtc_state) {} + +static inline int +intel_dp_tunnel_atomic_add_state_for_crtc(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + return 0; +} + +static inline int +intel_dp_tunnel_atomic_check_link(struct intel_atomic_state *state, + struct intel_link_bw_limits *limits) +{ + return 0; +} + +static inline int +intel_dp_tunnel_atomic_check_state(struct intel_atomic_state *state, + struct intel_dp *intel_dp, + struct intel_connector *connector) +{ + return 0; +} + +static inline void +intel_dp_tunnel_atomic_alloc_bw(struct intel_atomic_state *state, + struct intel_encoder *encoder, + const struct intel_crtc_state *new_crtc_state, + const struct drm_connector_state *new_conn_state) {} +static inline void +intel_dp_tunnel_atomic_free_bw(struct intel_atomic_state *state, + struct intel_encoder *encoder, + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) {} + +static inline int +intel_dp_tunnel_mgr_init(struct drm_i915_private *i915) +{ + return 0; +} + +static inline void intel_dp_tunnel_mgr_cleanup(struct drm_i915_private *i915) {} + +#endif /* CONFIG_DRM_I915_DP_TUNNEL */ + +#endif /* __INTEL_DP_TUNNEL_H__ */ From patchwork Tue Jan 23 13:26:45 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [36/44] drm/i915/dp: Add DP tunnel atomic state and check BW limit From: Imre Deak X-Patchwork-Id: 575598 Message-Id: <20240123132653.413364-37-imre.deak@intel.com> To: intel-gfx-trybot@lists.freedesktop.org Date: Tue, 23 Jan 2024 15:26:45 +0200 Add the atomic state during a modeset required to enable the DP tunnel BW allocation mode on links where such a tunnel was detected. Signed-off-by: Imre Deak --- drivers/gpu/drm/i915/display/intel_atomic.c | 8 ++++++++ drivers/gpu/drm/i915/display/intel_display.c | 19 +++++++++++++++++++ drivers/gpu/drm/i915/display/intel_link_bw.c | 5 +++++ 3 files changed, 32 insertions(+) diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c index 96ab37e158995..4236740ede9ed 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic.c +++ b/drivers/gpu/drm/i915/display/intel_atomic.c @@ -260,6 +260,10 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc) if (crtc_state->post_csc_lut) drm_property_blob_get(crtc_state->post_csc_lut); + if (crtc_state->dp_tunnel_ref.tunnel) + drm_dp_tunnel_ref_get(old_crtc_state->dp_tunnel_ref.tunnel, + &crtc_state->dp_tunnel_ref); + crtc_state->update_pipe = false; crtc_state->update_m_n = false; crtc_state->update_lrr = false; @@ -311,6 +315,8 @@ intel_crtc_destroy_state(struct drm_crtc *crtc, __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi); intel_crtc_free_hw_state(crtc_state); + if (crtc_state->dp_tunnel_ref.tunnel) + drm_dp_tunnel_ref_put(&crtc_state->dp_tunnel_ref); kfree(crtc_state); } @@ -346,6 +352,8 @@ void intel_atomic_state_clear(struct drm_atomic_state *s) /* state->internal not reset on purpose */ state->dpll_set = state->modeset = false; + + intel_dp_tunnel_atomic_cleanup_inherited_state(state); } struct intel_crtc_state * diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index b9f985a5e705b..46b27a32c8640 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -33,6 +33,7 @@ #include #include +#include #include #include #include @@ -73,6 +74,7 @@ #include "intel_dp.h" #include "intel_dp_link_training.h" #include "intel_dp_mst.h" +#include "intel_dp_tunnel.h" #include "intel_dpll.h" #include "intel_dpll_mgr.h" #include "intel_dpt.h" @@ -4490,6 +4492,8 @@ copy_bigjoiner_crtc_state_modeset(struct intel_atomic_state *state, saved_state->crc_enabled = slave_crtc_state->crc_enabled; intel_crtc_free_hw_state(slave_crtc_state); + if (slave_crtc_state->dp_tunnel_ref.tunnel) + drm_dp_tunnel_ref_put(&slave_crtc_state->dp_tunnel_ref); memcpy(slave_crtc_state, saved_state, sizeof(*slave_crtc_state)); kfree(saved_state); @@ -4505,6 +4509,10 @@ copy_bigjoiner_crtc_state_modeset(struct intel_atomic_state *state, &master_crtc_state->hw.adjusted_mode); slave_crtc_state->hw.scaling_filter = master_crtc_state->hw.scaling_filter; + if (master_crtc_state->dp_tunnel_ref.tunnel) + drm_dp_tunnel_ref_get(master_crtc_state->dp_tunnel_ref.tunnel, + &slave_crtc_state->dp_tunnel_ref); + copy_bigjoiner_crtc_state_nomodeset(state, slave_crtc); slave_crtc_state->uapi.mode_changed = master_crtc_state->uapi.mode_changed; @@ -4533,6 +4541,13 @@ intel_crtc_prepare_cleared_state(struct intel_atomic_state *state, /* free the old crtc_state->hw members */ intel_crtc_free_hw_state(crtc_state); + if (crtc_state->dp_tunnel_ref.tunnel) { + drm_dp_tunnel_atomic_set_stream_bw(&state->base, + crtc_state->dp_tunnel_ref.tunnel, + crtc->pipe, 0); + drm_dp_tunnel_ref_put(&crtc_state->dp_tunnel_ref); + } + /* FIXME: before the switch to atomic started, a new pipe_config was * kzalloc'd. Code that depends on any field being zero should be * fixed, so that the crtc_state can be safely duplicated. For now, @@ -5374,6 +5389,10 @@ static int intel_modeset_pipe(struct intel_atomic_state *state, if (ret) return ret; + ret = intel_dp_tunnel_atomic_add_state_for_crtc(state, crtc); + if (ret) + return ret; + ret = intel_dp_mst_add_topology_state_for_crtc(state, crtc); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/display/intel_link_bw.c b/drivers/gpu/drm/i915/display/intel_link_bw.c index 9c6d35a405a18..5b539ba996ddf 100644 --- a/drivers/gpu/drm/i915/display/intel_link_bw.c +++ b/drivers/gpu/drm/i915/display/intel_link_bw.c @@ -8,6 +8,7 @@ #include "intel_atomic.h" #include "intel_display_types.h" #include "intel_dp_mst.h" +#include "intel_dp_tunnel.h" #include "intel_fdi.h" #include "intel_link_bw.h" @@ -149,6 +150,10 @@ static int check_all_link_config(struct intel_atomic_state *state, if (ret) return ret; + ret = intel_dp_tunnel_atomic_check_link(state, limits); + if (ret) + return ret; + ret = intel_fdi_atomic_check_link(state, limits); if (ret) return ret; From patchwork Tue Jan 23 13:26:46 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [37/44] drm/i915/dp: Account for tunnel BW limit in intel_dp_max_link_data_rate() From: Imre Deak X-Patchwork-Id: 575599 Message-Id: <20240123132653.413364-38-imre.deak@intel.com> To: intel-gfx-trybot@lists.freedesktop.org Date: Tue, 23 Jan 2024 15:26:46 +0200 Take any link BW limitation into account in intel_dp_max_link_data_rate(). Such a limitation can be due to multiple displays on (Thunderbolt) links with DP tunnels sharing the link BW. Signed-off-by: Imre Deak --- drivers/gpu/drm/i915/display/intel_dp.c | 32 +++++++++++++++++++++---- 1 file changed, 28 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 323475569ee7f..78dfe8be6031d 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -63,6 +63,7 @@ #include "intel_dp_hdcp.h" #include "intel_dp_link_training.h" #include "intel_dp_mst.h" +#include "intel_dp_tunnel.h" #include "intel_dpio_phy.h" #include "intel_dpll.h" #include "intel_fifo_underrun.h" @@ -152,6 +153,22 @@ int intel_dp_link_symbol_clock(int rate) return DIV_ROUND_CLOSEST(rate * 10, intel_dp_link_symbol_size(rate)); } +static int max_dprx_rate(struct intel_dp *intel_dp) +{ + if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp)) + return drm_dp_tunnel_max_dprx_rate(intel_dp->tunnel); + + return drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]); +} + +static int max_dprx_lane_count(struct intel_dp *intel_dp) +{ + if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp)) + return drm_dp_tunnel_max_dprx_lane_count(intel_dp->tunnel); + + return drm_dp_max_lane_count(intel_dp->dpcd); +} + static void intel_dp_set_default_sink_rates(struct intel_dp *intel_dp) { intel_dp->sink_rates[0] = 162000; @@ -180,7 +197,7 @@ static void intel_dp_set_dpcd_sink_rates(struct intel_dp *intel_dp) /* * Sink rates for 8b/10b. */ - max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]); + max_rate = max_dprx_rate(intel_dp); max_lttpr_rate = drm_dp_lttpr_max_link_rate(intel_dp->lttpr_common_caps); if (max_lttpr_rate) max_rate = min(max_rate, max_lttpr_rate); @@ -259,7 +276,7 @@ static void intel_dp_set_max_sink_lane_count(struct intel_dp *intel_dp) struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct intel_encoder *encoder = &intel_dig_port->base; - intel_dp->max_sink_lane_count = drm_dp_max_lane_count(intel_dp->dpcd); + intel_dp->max_sink_lane_count = max_dprx_lane_count(intel_dp); switch (intel_dp->max_sink_lane_count) { case 1: @@ -389,14 +406,21 @@ int intel_dp_effective_data_rate(int pixel_clock, int bpp_x16, * @max_dprx_rate: Maximum data rate of the DPRX * @max_dprx_lanes: Maximum lane count of the DPRX * - * Calculate the maximum data rate for the provided link parameters. + * Calculate the maximum data rate for the provided link parameters taking into + * account any BW limitations by a DP tunnel attached to @intel_dp. * * Returns the maximum data rate in kBps units. */ int intel_dp_max_link_data_rate(struct intel_dp *intel_dp, int max_dprx_rate, int max_dprx_lanes) { - return drm_dp_max_dprx_data_rate(max_dprx_rate, max_dprx_lanes); + int max_rate = drm_dp_max_dprx_data_rate(max_dprx_rate, max_dprx_lanes); + + if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp)) + max_rate = min(max_rate, + drm_dp_tunnel_available_bw(intel_dp->tunnel)); + + return max_rate; } bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp) From patchwork Tue Jan 23 13:26:47 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [38/44] drm/i915/dp: Compute DP tunel BW during encoder state computation From: Imre Deak X-Patchwork-Id: 575601 Message-Id: <20240123132653.413364-39-imre.deak@intel.com> To: intel-gfx-trybot@lists.freedesktop.org Date: Tue, 23 Jan 2024 15:26:47 +0200 Compute the BW required through a DP tunnel on links with such tunnels detected and add the corresponding atomic state during a modeset. Signed-off-by: Imre Deak --- drivers/gpu/drm/i915/display/intel_dp.c | 16 +++++++++++++--- drivers/gpu/drm/i915/display/intel_dp_mst.c | 13 +++++++++++++ 2 files changed, 26 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 78dfe8be6031d..6968fdb7ffcdf 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -2880,6 +2880,7 @@ intel_dp_compute_config(struct intel_encoder *encoder, struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state); struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; struct intel_dp *intel_dp = enc_to_intel_dp(encoder); const struct drm_display_mode *fixed_mode; @@ -2980,6 +2981,9 @@ intel_dp_compute_config(struct intel_encoder *encoder, intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state); intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state); + intel_dp_tunnel_atomic_compute_stream_bw(state, intel_dp, connector, + pipe_config); + return 0; } @@ -6087,6 +6091,15 @@ static int intel_dp_connector_atomic_check(struct drm_connector *conn, return ret; } + if (!intel_connector_needs_modeset(state, conn)) + return 0; + + ret = intel_dp_tunnel_atomic_check_state(state, + intel_dp, + intel_conn); + if (ret) + return ret; + /* * We don't enable port sync on BDW due to missing w/as and * due to not having adjusted the modeset sequence appropriately. @@ -6094,9 +6107,6 @@ static int intel_dp_connector_atomic_check(struct drm_connector *conn, if (DISPLAY_VER(dev_priv) < 9) return 0; - if (!intel_connector_needs_modeset(state, conn)) - return 0; - if (conn->has_tile) { ret = intel_modeset_tile_group(state, conn->tile_group->id); if (ret) diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 520393dc8b453..cbfab3173b9ef 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -42,6 +42,7 @@ #include "intel_dp.h" #include "intel_dp_hdcp.h" #include "intel_dp_mst.h" +#include "intel_dp_tunnel.h" #include "intel_dpio_phy.h" #include "intel_hdcp.h" #include "intel_hotplug.h" @@ -523,6 +524,7 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder, struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state); struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); struct intel_dp *intel_dp = &intel_mst->primary->dp; const struct intel_connector *connector = @@ -619,6 +621,9 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder, intel_psr_compute_config(intel_dp, pipe_config, conn_state); + intel_dp_tunnel_atomic_compute_stream_bw(state, intel_dp, connector, + pipe_config); + return 0; } @@ -876,6 +881,14 @@ intel_dp_mst_atomic_check(struct drm_connector *connector, if (ret) return ret; + if (intel_connector_needs_modeset(state, connector)) { + ret = intel_dp_tunnel_atomic_check_state(state, + intel_connector->mst_port, + intel_connector); + if (ret) + return ret; + } + return drm_dp_atomic_release_time_slots(&state->base, &intel_connector->mst_port->mst_mgr, intel_connector->port); From patchwork Tue Jan 23 13:26:48 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [39/44] drm/i915/dp: Allocate/free DP tunnel BW in the encoder enable/disable hooks From: Imre Deak X-Patchwork-Id: 575621 Message-Id: <20240123132653.413364-40-imre.deak@intel.com> To: intel-gfx-trybot@lists.freedesktop.org Date: Tue, 23 Jan 2024 15:26:48 +0200 Allocate and free the DP tunnel BW required by a stream while enabling/disabling the stream during a modeset. Signed-off-by: Imre Deak --- drivers/gpu/drm/i915/display/g4x_dp.c | 28 ++++++++++++++++++++++++ drivers/gpu/drm/i915/display/intel_ddi.c | 7 ++++++ 2 files changed, 35 insertions(+) diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c index dfe0b07a122d1..1e498e1510adf 100644 --- a/drivers/gpu/drm/i915/display/g4x_dp.c +++ b/drivers/gpu/drm/i915/display/g4x_dp.c @@ -19,6 +19,7 @@ #include "intel_dp.h" #include "intel_dp_aux.h" #include "intel_dp_link_training.h" +#include "intel_dp_tunnel.h" #include "intel_dpio_phy.h" #include "intel_fifo_underrun.h" #include "intel_hdmi.h" @@ -729,6 +730,24 @@ static void vlv_enable_dp(struct intel_atomic_state *state, encoder->audio_enable(encoder, pipe_config, conn_state); } +static void g4x_dp_pre_pll_enable(struct intel_atomic_state *state, + struct intel_encoder *encoder, + const struct intel_crtc_state *new_crtc_state, + const struct drm_connector_state *new_conn_state) +{ + intel_dp_tunnel_atomic_alloc_bw(state, encoder, + new_crtc_state, new_conn_state); +} + +static void g4x_dp_post_pll_disable(struct intel_atomic_state *state, + struct intel_encoder *encoder, + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) +{ + intel_dp_tunnel_atomic_free_bw(state, encoder, + old_crtc_state, old_conn_state); +} + static void g4x_pre_enable_dp(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, @@ -762,6 +781,8 @@ static void vlv_dp_pre_pll_enable(struct intel_atomic_state *state, intel_dp_prepare(encoder, pipe_config); vlv_phy_pre_pll_enable(encoder, pipe_config); + + g4x_dp_pre_pll_enable(state, encoder, pipe_config, conn_state); } static void chv_pre_enable_dp(struct intel_atomic_state *state, @@ -785,6 +806,8 @@ static void chv_dp_pre_pll_enable(struct intel_atomic_state *state, intel_dp_prepare(encoder, pipe_config); chv_phy_pre_pll_enable(encoder, pipe_config); + + g4x_dp_pre_pll_enable(state, encoder, pipe_config, conn_state); } static void chv_dp_post_pll_disable(struct intel_atomic_state *state, @@ -792,6 +815,8 @@ static void chv_dp_post_pll_disable(struct intel_atomic_state *state, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { + g4x_dp_post_pll_disable(state, encoder, old_crtc_state, old_conn_state); + chv_phy_post_pll_disable(encoder, old_crtc_state); } @@ -1349,11 +1374,14 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv, intel_encoder->enable = vlv_enable_dp; intel_encoder->disable = vlv_disable_dp; intel_encoder->post_disable = vlv_post_disable_dp; + intel_encoder->post_pll_disable = g4x_dp_post_pll_disable; } else { + intel_encoder->pre_pll_enable = g4x_dp_pre_pll_enable; intel_encoder->pre_enable = g4x_pre_enable_dp; intel_encoder->enable = g4x_enable_dp; intel_encoder->disable = g4x_disable_dp; intel_encoder->post_disable = g4x_post_disable_dp; + intel_encoder->post_pll_disable = g4x_dp_post_pll_disable; } intel_encoder->audio_enable = g4x_dp_audio_enable; intel_encoder->audio_disable = g4x_dp_audio_disable; diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 922194b957be2..aa6e7da08fbce 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -54,6 +54,7 @@ #include "intel_dp_aux.h" #include "intel_dp_link_training.h" #include "intel_dp_mst.h" +#include "intel_dp_tunnel.h" #include "intel_dpio_phy.h" #include "intel_dsi.h" #include "intel_fdi.h" @@ -3141,6 +3142,9 @@ static void intel_ddi_post_pll_disable(struct intel_atomic_state *state, main_link_aux_power_domain_put(dig_port, old_crtc_state); + intel_dp_tunnel_atomic_free_bw(state, encoder, + old_crtc_state, old_conn_state); + if (is_tc_port) intel_tc_port_put_link(dig_port); } @@ -3480,6 +3484,9 @@ intel_ddi_pre_pll_enable(struct intel_atomic_state *state, intel_ddi_update_active_dpll(state, encoder, master_crtc); } + intel_dp_tunnel_atomic_alloc_bw(state, encoder, + crtc_state, conn_state); + main_link_aux_power_domain_get(dig_port, crtc_state); if (is_tc_port && !intel_tc_port_in_tbt_alt_mode(dig_port)) From patchwork Tue Jan 23 13:26:49 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [40/44] drm/i915/dp: Handle DP tunnel IRQs From: Imre Deak X-Patchwork-Id: 575602 Message-Id: <20240123132653.413364-41-imre.deak@intel.com> To: intel-gfx-trybot@lists.freedesktop.org Date: Tue, 23 Jan 2024 15:26:49 +0200 Handle DP tunnel IRQs a sink (or rather a BW management component like the Thunderbolt Connection Manager) raises to signal the completion of a BW request by the driver, or to signal any state change related to the link BW. Signed-off-by: Imre Deak --- drivers/gpu/drm/i915/display/intel_dp.c | 37 +++++++++++++++++++------ include/drm/display/drm_dp.h | 1 + 2 files changed, 29 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 6968fdb7ffcdf..8ebfb039000f6 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -4911,13 +4911,15 @@ static bool intel_dp_mst_link_status(struct intel_dp *intel_dp) * - %true if pending interrupts were serviced (or no interrupts were * pending) w/o detecting an error condition. * - %false if an error condition - like AUX failure or a loss of link - is - * detected, which needs servicing from the hotplug work. + * detected, or another condition - like a DP tunnel BW state change - needs + * servicing from the hotplug work. */ static bool intel_dp_check_mst_status(struct intel_dp *intel_dp) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); bool link_ok = true; + bool reprobe_needed = false; drm_WARN_ON_ONCE(&i915->drm, intel_dp->active_mst_links < 0); @@ -4944,6 +4946,13 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp) intel_dp_mst_hpd_irq(intel_dp, esi, ack); + if (esi[3] & DP_TUNNELING_IRQ) { + if (drm_dp_tunnel_handle_irq(i915->display.dp_tunnel_mgr, + &intel_dp->aux)) + reprobe_needed = true; + ack[3] |= DP_TUNNELING_IRQ; + } + if (!memchr_inv(ack, 0, sizeof(ack))) break; @@ -4954,7 +4963,7 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp) drm_dp_mst_hpd_irq_send_new_request(&intel_dp->mst_mgr); } - return link_ok; + return link_ok && !reprobe_needed; } static void @@ -5330,23 +5339,32 @@ static void intel_dp_check_device_service_irq(struct intel_dp *intel_dp) drm_dbg_kms(&i915->drm, "Sink specific irq unhandled\n"); } -static void intel_dp_check_link_service_irq(struct intel_dp *intel_dp) +static bool intel_dp_check_link_service_irq(struct intel_dp *intel_dp) { + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + bool reprobe_needed = false; u8 val; if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) - return; + return false; if (drm_dp_dpcd_readb(&intel_dp->aux, DP_LINK_SERVICE_IRQ_VECTOR_ESI0, &val) != 1 || !val) - return; + return false; + + if ((val & DP_TUNNELING_IRQ) && + drm_dp_tunnel_handle_irq(i915->display.dp_tunnel_mgr, + &intel_dp->aux)) + reprobe_needed = true; if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_LINK_SERVICE_IRQ_VECTOR_ESI0, val) != 1) - return; + return reprobe_needed; if (val & HDMI_LINK_STATUS_CHANGED) intel_dp_handle_hdmi_link_status_change(intel_dp); + + return reprobe_needed; } /* @@ -5367,6 +5385,7 @@ intel_dp_short_pulse(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); u8 old_sink_count = intel_dp->sink_count; + bool reprobe_needed = false; bool ret; /* @@ -5389,7 +5408,7 @@ intel_dp_short_pulse(struct intel_dp *intel_dp) } intel_dp_check_device_service_irq(intel_dp); - intel_dp_check_link_service_irq(intel_dp); + reprobe_needed = intel_dp_check_link_service_irq(intel_dp); /* Handle CEC interrupts, if any */ drm_dp_cec_irq(&intel_dp->aux); @@ -5416,10 +5435,10 @@ intel_dp_short_pulse(struct intel_dp *intel_dp) * FIXME get rid of the ad-hoc phy test modeset code * and properly incorporate it into the normal modeset. */ - return false; + reprobe_needed = true; } - return true; + return !reprobe_needed; } /* XXX this is probably wrong for multiple downstream ports */ diff --git a/include/drm/display/drm_dp.h b/include/drm/display/drm_dp.h index 8bfd5d007be8d..4891bd916d26a 100644 --- a/include/drm/display/drm_dp.h +++ b/include/drm/display/drm_dp.h @@ -1081,6 +1081,7 @@ # define STREAM_STATUS_CHANGED (1 << 2) # define HDMI_LINK_STATUS_CHANGED (1 << 3) # define CONNECTED_OFF_ENTRY_REQUESTED (1 << 4) +# define DP_TUNNELING_IRQ (1 << 5) #define DP_PSR_ERROR_STATUS 0x2006 /* XXX 1.2? */ # define DP_PSR_LINK_CRC_ERROR (1 << 0) From patchwork Tue Jan 23 13:26:50 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [41/44] drm/i915/dp: Call intel_dp_sync_state() always for DDI DP encoders From: Imre Deak X-Patchwork-Id: 575603 Message-Id: <20240123132653.413364-42-imre.deak@intel.com> To: intel-gfx-trybot@lists.freedesktop.org Date: Tue, 23 Jan 2024 15:26:50 +0200 A follow-up change will need to resume DP tunnels during system resume, so call intel_dp_sync_state() always for DDI encoders, so this function can resume the tunnels for all DP connectors. Signed-off-by: Imre Deak --- drivers/gpu/drm/i915/display/intel_ddi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index aa6e7da08fbce..1e26e62b82d48 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -4131,7 +4131,7 @@ static void intel_ddi_sync_state(struct intel_encoder *encoder, intel_tc_port_sanitize_mode(enc_to_dig_port(encoder), crtc_state); - if (crtc_state && intel_crtc_has_dp_encoder(crtc_state)) + if (intel_encoder_is_dp(encoder)) intel_dp_sync_state(encoder, crtc_state); } From patchwork Tue Jan 23 13:26:51 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [42/44] drm/i915/dp: Suspend/resume DP tunnels From: Imre Deak X-Patchwork-Id: 575622 Message-Id: <20240123132653.413364-43-imre.deak@intel.com> To: intel-gfx-trybot@lists.freedesktop.org Date: Tue, 23 Jan 2024 15:26:51 +0200 Suspend and resume DP tunnels during system suspend/resume, disabling the BW allocation mode during suspend, re-enabling it after resume. This reflects the link's BW management component (Thunderbolt CM) disabling BWA during suspend. Before any BW requests the driver must read the sink's DPRX capabilities (since the BW manager requires this information, so snoops for it on AUX), so ensure this read takes place. Signed-off-by: Imre Deak --- drivers/gpu/drm/i915/display/intel_dp.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 8ebfb039000f6..bc138a54f8d7b 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -36,6 +36,7 @@ #include #include +#include #include #include #include @@ -3320,18 +3321,21 @@ void intel_dp_sync_state(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - - if (!crtc_state) - return; + bool dpcd_updated = false; /* * Don't clobber DPCD if it's been already read out during output * setup (eDP) or detect. */ - if (intel_dp->dpcd[DP_DPCD_REV] == 0) + if (crtc_state && intel_dp->dpcd[DP_DPCD_REV] == 0) { intel_dp_get_dpcd(intel_dp); + dpcd_updated = true; + } - intel_dp_reset_max_link_params(intel_dp); + intel_dp_tunnel_resume(intel_dp, dpcd_updated); + + if (crtc_state) + intel_dp_reset_max_link_params(intel_dp); } bool intel_dp_initial_fastset_check(struct intel_encoder *encoder, @@ -5973,6 +5977,8 @@ void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder); intel_pps_vdd_off_sync(intel_dp); + + intel_dp_tunnel_suspend(intel_dp); } void intel_dp_encoder_shutdown(struct intel_encoder *intel_encoder) From patchwork Tue Jan 23 13:26:52 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [43/44] drm/i915/dp: Enable DP tunnel BW allocation mode From: Imre Deak X-Patchwork-Id: 575606 Message-Id: <20240123132653.413364-44-imre.deak@intel.com> To: intel-gfx-trybot@lists.freedesktop.org Date: Tue, 23 Jan 2024 15:26:52 +0200 Detect DP tunnels and enable the BW allocation mode on them. Send a hotplug notification to userspace in response to a BW change. Signed-off-by: Imre Deak --- .../drm/i915/display/intel_display_driver.c | 20 +++++++++++++++---- drivers/gpu/drm/i915/display/intel_dp.c | 14 +++++++++++-- 2 files changed, 28 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c index ecf9cb74734b6..62987b8427f7b 100644 --- a/drivers/gpu/drm/i915/display/intel_display_driver.c +++ b/drivers/gpu/drm/i915/display/intel_display_driver.c @@ -35,6 +35,7 @@ #include "intel_dkl_phy.h" #include "intel_dmc.h" #include "intel_dp.h" +#include "intel_dp_tunnel.h" #include "intel_dpll.h" #include "intel_dpll_mgr.h" #include "intel_fb.h" @@ -435,10 +436,8 @@ int intel_display_driver_probe_nogem(struct drm_i915_private *i915) for_each_pipe(i915, pipe) { ret = intel_crtc_init(i915, pipe); - if (ret) { - intel_mode_config_cleanup(i915); - return ret; - } + if (ret) + goto err_mode_config; } intel_plane_possible_crtcs_init(i915); @@ -460,6 +459,10 @@ int intel_display_driver_probe_nogem(struct drm_i915_private *i915) intel_vga_disable(i915); intel_setup_outputs(i915); + ret = intel_dp_tunnel_mgr_init(i915); + if (ret) + goto err_hdcp; + intel_display_driver_disable_user_access(i915); drm_modeset_lock_all(dev); @@ -482,6 +485,13 @@ int intel_display_driver_probe_nogem(struct drm_i915_private *i915) ilk_wm_sanitize(i915); return 0; + +err_hdcp: + intel_hdcp_component_fini(i915); +err_mode_config: + intel_mode_config_cleanup(i915); + + return ret; } /* part #3: call after gem init */ @@ -598,6 +608,8 @@ void intel_display_driver_remove_noirq(struct drm_i915_private *i915) intel_mode_config_cleanup(i915); + intel_dp_tunnel_mgr_cleanup(i915); + intel_overlay_cleanup(i915); intel_gmbus_teardown(i915); diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index bc138a54f8d7b..6133266d78276 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -5752,6 +5752,7 @@ intel_dp_detect(struct drm_connector *connector, struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct intel_encoder *encoder = &dig_port->base; enum drm_connector_status status; + int ret; drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); @@ -5787,9 +5788,18 @@ intel_dp_detect(struct drm_connector *connector, intel_dp->is_mst); } + intel_dp_tunnel_disconnect(intel_dp); + goto out; } + ret = intel_dp_tunnel_detect(intel_dp, ctx); + if (ret == -EDEADLK) + return ret; + + if (ret == 1) + intel_connector->base.epoch_counter++; + intel_dp_detect_dsc_caps(intel_dp, intel_connector); intel_dp_configure_mst(intel_dp); @@ -5820,8 +5830,6 @@ intel_dp_detect(struct drm_connector *connector, * with an IRQ_HPD, so force a link status check. */ if (!intel_dp_is_edp(intel_dp)) { - int ret; - ret = intel_dp_retrain_link(encoder, ctx); if (ret) return ret; @@ -5961,6 +5969,8 @@ void intel_dp_encoder_flush_work(struct drm_encoder *encoder) intel_dp_mst_encoder_cleanup(dig_port); + intel_dp_tunnel_destroy(intel_dp); + intel_pps_vdd_off_sync(intel_dp); /* From patchwork Tue Jan 23 13:26:53 2024 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: [44/44] drm/i915/dp: Read DPRX for all short/long HPD pulses From: Imre Deak X-Patchwork-Id: 575607 Message-Id: <20240123132653.413364-45-imre.deak@intel.com> To: intel-gfx-trybot@lists.freedesktop.org Date: Tue, 23 Jan 2024 15:26:53 +0200 Signed-off-by: Imre Deak --- drivers/gpu/drm/i915/display/intel_dp.c | 53 ++++++++++++------------- 1 file changed, 25 insertions(+), 28 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 6133266d78276..ab67af327bf10 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -5388,28 +5388,7 @@ static bool intel_dp_short_pulse(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - u8 old_sink_count = intel_dp->sink_count; bool reprobe_needed = false; - bool ret; - - /* - * Clearing compliance test variables to allow capturing - * of values for next automated test request. - */ - memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); - - /* - * Now read the DPCD to see if it's actually running - * If the current value of sink count doesn't match with - * the value that was stored earlier or dpcd read failed - * we need to do full detection - */ - ret = intel_dp_get_dpcd(intel_dp); - - if ((old_sink_count != intel_dp->sink_count) || !ret) { - /* No need to proceed if we are going to do full detect */ - return false; - } intel_dp_check_device_service_irq(intel_dp); reprobe_needed = intel_dp_check_link_service_irq(intel_dp); @@ -6198,6 +6177,8 @@ intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); struct intel_dp *intel_dp = &dig_port->dp; + u8 old_sink_count = intel_dp->sink_count; + bool reprobe_needed = false; if (dig_port->base.type == INTEL_OUTPUT_EDP && (long_hpd || !intel_pps_have_panel_power_or_vdd(intel_dp))) { @@ -6220,19 +6201,35 @@ intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd) dig_port->base.base.name, long_hpd ? "long" : "short"); + /* + * Clearing compliance test variables to allow capturing + * of values for next automated test request. + */ + memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); + + /* + * Now read the DPCD to see if it's actually running + * If the current value of sink count doesn't match with + * the value that was stored earlier or dpcd read failed + * we need to do full detection + */ + if (!intel_dp_get_dpcd(intel_dp) || + (!intel_dp->is_mst && old_sink_count != intel_dp->sink_count)) { + /* No need to proceed if we are going to do full detect */ + reprobe_needed = true; + } + if (long_hpd) { intel_dp->reset_link_params = true; - return IRQ_NONE; - } - - if (intel_dp->is_mst) { + reprobe_needed = true; + } else if (intel_dp->is_mst) { if (!intel_dp_check_mst_status(intel_dp)) - return IRQ_NONE; + reprobe_needed = true; } else if (!intel_dp_short_pulse(intel_dp)) { - return IRQ_NONE; + reprobe_needed = true; } - return IRQ_HANDLED; + return reprobe_needed ? IRQ_NONE : IRQ_HANDLED; } static bool _intel_dp_is_port_edp(struct drm_i915_private *dev_priv,