rtw89: update Realtek's rtw89 driver

This version is based on
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
7d0a66e4bb9081d75c82ec4957c50034cb0ea449 ( tag: v6.18 ).

Sponsored by:	The FreeBSD Foundation
This commit is contained in:
Bjoern A. Zeeb 2025-12-05 20:48:02 +00:00
parent b35044b38f
commit 4fbdf5199e
43 changed files with 2558 additions and 394 deletions

11
chan.c
View file

@ -281,6 +281,7 @@ void rtw89_entity_init(struct rtw89_dev *rtwdev)
{
struct rtw89_hal *hal = &rtwdev->hal;
struct rtw89_entity_mgnt *mgnt = &hal->entity_mgnt;
int i, j;
hal->entity_pause = false;
bitmap_zero(hal->entity_map, NUM_OF_RTW89_CHANCTX);
@ -289,6 +290,11 @@ void rtw89_entity_init(struct rtw89_dev *rtwdev)
INIT_LIST_HEAD(&mgnt->active_list);
for (i = 0; i < RTW89_MAX_INTERFACE_NUM; i++) {
for (j = 0; j < __RTW89_MLD_MAX_LINK_NUM; j++)
mgnt->chanctx_tbl[i][j] = RTW89_CHANCTX_IDLE;
}
rtw89_config_default_chandef(rtwdev);
}
@ -353,7 +359,7 @@ static void rtw89_normalize_link_chanctx(struct rtw89_dev *rtwdev,
const struct rtw89_chan *__rtw89_mgnt_chan_get(struct rtw89_dev *rtwdev,
const char *caller_message,
u8 link_index)
u8 link_index, bool nullchk)
{
struct rtw89_hal *hal = &rtwdev->hal;
struct rtw89_entity_mgnt *mgnt = &hal->entity_mgnt;
@ -400,6 +406,9 @@ const struct rtw89_chan *__rtw89_mgnt_chan_get(struct rtw89_dev *rtwdev,
return rtw89_chan_get(rtwdev, chanctx_idx);
dflt:
if (unlikely(nullchk))
return NULL;
rtw89_debug(rtwdev, RTW89_DBG_CHAN,
"%s (%s): prefetch NULL on link index %u\n",
__func__, caller_message ?: "", link_index);

10
chan.h
View file

@ -180,10 +180,16 @@ void rtw89_chanctx_proceed(struct rtw89_dev *rtwdev,
const struct rtw89_chan *__rtw89_mgnt_chan_get(struct rtw89_dev *rtwdev,
const char *caller_message,
u8 link_index);
u8 link_index, bool nullchk);
#define rtw89_mgnt_chan_get(rtwdev, link_index) \
__rtw89_mgnt_chan_get(rtwdev, __func__, link_index)
__rtw89_mgnt_chan_get(rtwdev, __func__, link_index, false)
static inline const struct rtw89_chan *
rtw89_mgnt_chan_get_or_null(struct rtw89_dev *rtwdev, u8 link_index)
{
return __rtw89_mgnt_chan_get(rtwdev, NULL, link_index, true);
}
struct rtw89_mcc_links_info {
struct rtw89_vif_link *links[NUM_OF_RTW89_MCC_ROLES];

5
coex.c
View file

@ -93,7 +93,7 @@ static const struct rtw89_btc_fbtc_slot s_def[] = {
[CXST_E2G] = __DEF_FBTC_SLOT(5, 0xea5a5a5a, SLOT_MIX),
[CXST_E5G] = __DEF_FBTC_SLOT(5, 0xffffffff, SLOT_ISO),
[CXST_EBT] = __DEF_FBTC_SLOT(5, 0xe5555555, SLOT_MIX),
[CXST_ENULL] = __DEF_FBTC_SLOT(5, 0xaaaaaaaa, SLOT_ISO),
[CXST_ENULL] = __DEF_FBTC_SLOT(5, 0x55555555, SLOT_MIX),
[CXST_WLK] = __DEF_FBTC_SLOT(250, 0xea5a5a5a, SLOT_MIX),
[CXST_W1FDD] = __DEF_FBTC_SLOT(50, 0xffffffff, SLOT_ISO),
[CXST_B1FDD] = __DEF_FBTC_SLOT(50, 0xffffdfff, SLOT_ISO),
@ -4153,6 +4153,7 @@ void rtw89_btc_set_policy_v1(struct rtw89_dev *rtwdev, u16 policy_type)
s_def[CXST_EBT].cxtbl, s_def[CXST_EBT].cxtype);
_slot_set_le(btc, CXST_ENULL, s_def[CXST_ENULL].dur,
s_def[CXST_ENULL].cxtbl, s_def[CXST_ENULL].cxtype);
_slot_set_dur(btc, CXST_EBT, dur_2);
break;
case BTC_CXP_OFFE_DEF2:
_slot_set(btc, CXST_E2G, 20, cxtbl[1], SLOT_ISO);
@ -4162,6 +4163,7 @@ void rtw89_btc_set_policy_v1(struct rtw89_dev *rtwdev, u16 policy_type)
s_def[CXST_EBT].cxtbl, s_def[CXST_EBT].cxtype);
_slot_set_le(btc, CXST_ENULL, s_def[CXST_ENULL].dur,
s_def[CXST_ENULL].cxtbl, s_def[CXST_ENULL].cxtype);
_slot_set_dur(btc, CXST_EBT, dur_2);
break;
case BTC_CXP_OFFE_2GBWMIXB:
if (a2dp->exist)
@ -4170,6 +4172,7 @@ void rtw89_btc_set_policy_v1(struct rtw89_dev *rtwdev, u16 policy_type)
_slot_set(btc, CXST_E2G, 5, tbl_w1, SLOT_MIX);
_slot_set_le(btc, CXST_EBT, cpu_to_le16(40),
s_def[CXST_EBT].cxtbl, s_def[CXST_EBT].cxtype);
_slot_set_dur(btc, CXST_EBT, dur_2);
break;
case BTC_CXP_OFFE_WL: /* for 4-way */
_slot_set(btc, CXST_E2G, 5, cxtbl[1], SLOT_MIX);

686
core.c
View file

@ -2,6 +2,7 @@
/* Copyright(c) 2019-2020 Realtek Corporation
*/
#include <linux/ip.h>
#include <linux/sort.h>
#include <linux/udp.h>
#include "cam.h"
@ -272,17 +273,18 @@ rtw89_get_6ghz_span(struct rtw89_dev *rtwdev, u32 center_freq)
return NULL;
}
bool rtw89_ra_report_to_bitrate(struct rtw89_dev *rtwdev, u8 rpt_rate, u16 *bitrate)
bool rtw89_legacy_rate_to_bitrate(struct rtw89_dev *rtwdev, u8 legacy_rate, u16 *bitrate)
{
struct ieee80211_rate rate;
const struct ieee80211_rate *rate;
if (unlikely(rpt_rate >= ARRAY_SIZE(rtw89_bitrates))) {
rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "invalid rpt rate %d\n", rpt_rate);
if (unlikely(legacy_rate >= ARRAY_SIZE(rtw89_bitrates))) {
rtw89_debug(rtwdev, RTW89_DBG_UNEXP,
"invalid legacy rate %d\n", legacy_rate);
return false;
}
rate = rtw89_bitrates[rpt_rate];
*bitrate = rate.bitrate;
rate = &rtw89_bitrates[legacy_rate];
*bitrate = rate->bitrate;
return true;
}
@ -697,6 +699,66 @@ static void rtw89_core_tx_update_llc_hdr(struct rtw89_dev *rtwdev,
desc_info->hdr_llc_len >>= 1; /* in unit of 2 bytes */
}
u8 rtw89_core_get_ch_dma(struct rtw89_dev *rtwdev, u8 qsel)
{
switch (qsel) {
default:
rtw89_warn(rtwdev, "Cannot map qsel to dma: %d\n", qsel);
fallthrough;
case RTW89_TX_QSEL_BE_0:
case RTW89_TX_QSEL_BE_1:
case RTW89_TX_QSEL_BE_2:
case RTW89_TX_QSEL_BE_3:
return RTW89_TXCH_ACH0;
case RTW89_TX_QSEL_BK_0:
case RTW89_TX_QSEL_BK_1:
case RTW89_TX_QSEL_BK_2:
case RTW89_TX_QSEL_BK_3:
return RTW89_TXCH_ACH1;
case RTW89_TX_QSEL_VI_0:
case RTW89_TX_QSEL_VI_1:
case RTW89_TX_QSEL_VI_2:
case RTW89_TX_QSEL_VI_3:
return RTW89_TXCH_ACH2;
case RTW89_TX_QSEL_VO_0:
case RTW89_TX_QSEL_VO_1:
case RTW89_TX_QSEL_VO_2:
case RTW89_TX_QSEL_VO_3:
return RTW89_TXCH_ACH3;
case RTW89_TX_QSEL_B0_MGMT:
return RTW89_TXCH_CH8;
case RTW89_TX_QSEL_B0_HI:
return RTW89_TXCH_CH9;
case RTW89_TX_QSEL_B1_MGMT:
return RTW89_TXCH_CH10;
case RTW89_TX_QSEL_B1_HI:
return RTW89_TXCH_CH11;
}
}
EXPORT_SYMBOL(rtw89_core_get_ch_dma);
u8 rtw89_core_get_ch_dma_v1(struct rtw89_dev *rtwdev, u8 qsel)
{
switch (qsel) {
default:
rtw89_warn(rtwdev, "Cannot map qsel to dma v1: %d\n", qsel);
fallthrough;
case RTW89_TX_QSEL_BE_0:
case RTW89_TX_QSEL_BK_0:
return RTW89_TXCH_ACH0;
case RTW89_TX_QSEL_VI_0:
case RTW89_TX_QSEL_VO_0:
return RTW89_TXCH_ACH2;
case RTW89_TX_QSEL_B0_MGMT:
case RTW89_TX_QSEL_B0_HI:
return RTW89_TXCH_CH8;
case RTW89_TX_QSEL_B1_MGMT:
case RTW89_TX_QSEL_B1_HI:
return RTW89_TXCH_CH10;
}
}
EXPORT_SYMBOL(rtw89_core_get_ch_dma_v1);
static void
rtw89_core_tx_update_mgmt_info(struct rtw89_dev *rtwdev,
struct rtw89_core_tx_request *tx_req)
@ -710,7 +772,7 @@ rtw89_core_tx_update_mgmt_info(struct rtw89_dev *rtwdev,
u8 qsel, ch_dma;
qsel = rtw89_core_get_qsel_mgmt(rtwdev, tx_req);
ch_dma = rtw89_core_get_ch_dma(rtwdev, qsel);
ch_dma = rtw89_chip_get_ch_dma(rtwdev, qsel);
desc_info->qsel = qsel;
desc_info->ch_dma = ch_dma;
@ -927,7 +989,7 @@ rtw89_core_tx_update_data_info(struct rtw89_dev *rtwdev,
tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
tid_indicate = rtw89_core_get_tid_indicate(rtwdev, tid);
qsel = desc_info->hiq ? RTW89_TX_QSEL_B0_HI : rtw89_core_get_qsel(rtwdev, tid);
ch_dma = rtw89_core_get_ch_dma(rtwdev, qsel);
ch_dma = rtw89_chip_get_ch_dma(rtwdev, qsel);
desc_info->ch_dma = ch_dma;
desc_info->tid_indicate = tid_indicate;
@ -1073,42 +1135,46 @@ rtw89_core_tx_update_desc_info(struct rtw89_dev *rtwdev,
}
}
static void rtw89_tx_wait_work(struct wiphy *wiphy, struct wiphy_work *work)
{
struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
tx_wait_work.work);
rtw89_tx_wait_list_clear(rtwdev);
}
void rtw89_core_tx_kick_off(struct rtw89_dev *rtwdev, u8 qsel)
{
u8 ch_dma;
ch_dma = rtw89_core_get_ch_dma(rtwdev, qsel);
ch_dma = rtw89_chip_get_ch_dma(rtwdev, qsel);
rtw89_hci_tx_kick_off(rtwdev, ch_dma);
}
int rtw89_core_tx_kick_off_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb,
int qsel, unsigned int timeout)
struct rtw89_tx_wait_info *wait, int qsel,
unsigned int timeout)
{
struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb);
struct rtw89_tx_wait_info *wait;
unsigned long time_left;
int ret = 0;
wait = kzalloc(sizeof(*wait), GFP_KERNEL);
if (!wait) {
rtw89_core_tx_kick_off(rtwdev, qsel);
return 0;
}
init_completion(&wait->completion);
rcu_assign_pointer(skb_data->wait, wait);
lockdep_assert_wiphy(rtwdev->hw->wiphy);
rtw89_core_tx_kick_off(rtwdev, qsel);
time_left = wait_for_completion_timeout(&wait->completion,
msecs_to_jiffies(timeout));
if (time_left == 0)
ret = -ETIMEDOUT;
else if (!wait->tx_done)
ret = -EAGAIN;
rcu_assign_pointer(skb_data->wait, NULL);
kfree_rcu(wait, rcu_head);
if (time_left == 0) {
ret = -ETIMEDOUT;
list_add_tail(&wait->list, &rtwdev->tx_waits);
wiphy_delayed_work_queue(rtwdev->hw->wiphy, &rtwdev->tx_wait_work,
RTW89_TX_WAIT_WORK_TIMEOUT);
} else {
if (!wait->tx_done)
ret = -EAGAIN;
rtw89_tx_wait_release(wait);
}
return ret;
}
@ -1157,10 +1223,12 @@ int rtw89_h2c_tx(struct rtw89_dev *rtwdev,
static int rtw89_core_tx_write_link(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
struct rtw89_sta_link *rtwsta_link,
struct sk_buff *skb, int *qsel, bool sw_mld)
struct sk_buff *skb, int *qsel, bool sw_mld,
struct rtw89_tx_wait_info *wait)
{
struct ieee80211_sta *sta = rtwsta_link_to_sta_safe(rtwsta_link);
struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb);
struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
struct rtw89_core_tx_request tx_req = {};
int ret;
@ -1177,6 +1245,8 @@ static int rtw89_core_tx_write_link(struct rtw89_dev *rtwdev,
rtw89_core_tx_update_desc_info(rtwdev, &tx_req);
rtw89_core_tx_wake(rtwdev, &tx_req);
rcu_assign_pointer(skb_data->wait, wait);
ret = rtw89_hci_tx_write(rtwdev, &tx_req);
if (ret) {
rtw89_err(rtwdev, "failed to transmit skb to HCI\n");
@ -1213,7 +1283,8 @@ int rtw89_core_tx_write(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
}
}
return rtw89_core_tx_write_link(rtwdev, rtwvif_link, rtwsta_link, skb, qsel, false);
return rtw89_core_tx_write_link(rtwdev, rtwvif_link, rtwsta_link, skb, qsel, false,
NULL);
}
static __le32 rtw89_build_txwd_body0(struct rtw89_tx_desc_info *desc_info)
@ -1835,6 +1906,10 @@ static void rtw89_core_parse_phy_status_ie00(struct rtw89_dev *rtwdev,
tmp_rpl = le32_get_bits(ie->w0, RTW89_PHY_STS_IE00_W0_RPL);
phy_ppdu->rpl_avg = tmp_rpl >> 1;
if (!phy_ppdu->hdr_2_en)
phy_ppdu->rx_path_en =
le32_get_bits(ie->w3, RTW89_PHY_STS_IE00_W3_RX_PATH_EN);
}
static void rtw89_core_parse_phy_status_ie00_v2(struct rtw89_dev *rtwdev,
@ -2221,6 +2296,435 @@ static void rtw89_vif_sync_bcn_tsf(struct rtw89_vif_link *rtwvif_link,
WRITE_ONCE(rtwvif_link->sync_bcn_tsf, le64_to_cpu(mgmt->u.beacon.timestamp));
}
static u32 rtw89_bcn_calc_min_tbtt(struct rtw89_dev *rtwdev, u32 tbtt1, u32 tbtt2)
{
struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track;
u32 close_bcn_intvl_th = bcn_track->close_bcn_intvl_th;
u32 tbtt_diff_th = bcn_track->tbtt_diff_th;
if (tbtt2 > tbtt1)
swap(tbtt1, tbtt2);
if (tbtt1 - tbtt2 > tbtt_diff_th)
return tbtt1;
else if (tbtt2 > close_bcn_intvl_th)
return tbtt2;
else if (tbtt1 > close_bcn_intvl_th)
return tbtt1;
else
return tbtt2;
}
static void rtw89_bcn_cfg_tbtt_offset(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link)
{
struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track;
enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
u32 offset = bcn_track->tbtt_offset;
if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
const struct rtw89_port_reg *p = mac->port_base;
u32 bcnspc, val;
bcnspc = rtw89_read32_port_mask(rtwdev, rtwvif_link,
p->bcn_space, B_AX_BCN_SPACE_MASK);
val = bcnspc - (offset / 1024);
val = u32_encode_bits(val, B_AX_TBTT_SHIFT_OFST_MAG) |
B_AX_TBTT_SHIFT_OFST_SIGN;
rtw89_write16_port_mask(rtwdev, rtwvif_link, p->tbtt_shift,
B_AX_TBTT_SHIFT_OFST_MASK, val);
return;
}
rtw89_fw_h2c_tbtt_tuning(rtwdev, rtwvif_link, offset);
}
static void rtw89_bcn_update_tbtt_offset(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link)
{
struct rtw89_beacon_stat *bcn_stat = &rtwdev->phystat.bcn_stat;
struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track;
u32 *tbtt_us = bcn_stat->tbtt_us;
u32 offset = tbtt_us[0];
u8 i;
for (i = 1; i < RTW89_BCN_TRACK_STAT_NR; i++)
offset = rtw89_bcn_calc_min_tbtt(rtwdev, tbtt_us[i], offset);
if (bcn_track->tbtt_offset == offset)
return;
bcn_track->tbtt_offset = offset;
rtw89_bcn_cfg_tbtt_offset(rtwdev, rtwvif_link);
}
static int cmp_u16(const void *a, const void *b)
{
return *(const u16 *)a - *(const u16 *)b;
}
static u16 _rtw89_bcn_calc_drift(u16 tbtt, u16 offset, u16 beacon_int)
{
if (tbtt < offset)
return beacon_int - offset + tbtt;
return tbtt - offset;
}
static void rtw89_bcn_calc_drift(struct rtw89_dev *rtwdev)
{
struct rtw89_beacon_stat *bcn_stat = &rtwdev->phystat.bcn_stat;
struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track;
u16 offset_tu = bcn_track->tbtt_offset / 1024;
u16 *tbtt_tu = bcn_stat->tbtt_tu;
u16 *drift = bcn_stat->drift;
u8 i;
bcn_stat->tbtt_tu_min = U16_MAX;
bcn_stat->tbtt_tu_max = 0;
for (i = 0; i < RTW89_BCN_TRACK_STAT_NR; i++) {
drift[i] = _rtw89_bcn_calc_drift(tbtt_tu[i], offset_tu,
bcn_track->beacon_int);
bcn_stat->tbtt_tu_min = min(bcn_stat->tbtt_tu_min, tbtt_tu[i]);
bcn_stat->tbtt_tu_max = max(bcn_stat->tbtt_tu_max, tbtt_tu[i]);
}
sort(drift, RTW89_BCN_TRACK_STAT_NR, sizeof(*drift), cmp_u16, NULL);
}
static void rtw89_bcn_calc_distribution(struct rtw89_dev *rtwdev)
{
struct rtw89_beacon_stat *bcn_stat = &rtwdev->phystat.bcn_stat;
struct rtw89_beacon_dist *bcn_dist = &bcn_stat->bcn_dist;
u16 lower_bound, upper_bound, outlier_count = 0;
u16 *drift = bcn_stat->drift;
u16 *bins = bcn_dist->bins;
u16 q1, q3, iqr, tmp;
u8 i;
BUILD_BUG_ON(RTW89_BCN_TRACK_STAT_NR % 4 != 0);
memset(bcn_dist, 0, sizeof(*bcn_dist));
bcn_dist->min = drift[0];
bcn_dist->max = drift[RTW89_BCN_TRACK_STAT_NR - 1];
tmp = RTW89_BCN_TRACK_STAT_NR / 4;
q1 = ((drift[tmp] + drift[tmp - 1]) * RTW89_BCN_TRACK_SCALE_FACTOR) / 2;
tmp = (RTW89_BCN_TRACK_STAT_NR * 3) / 4;
q3 = ((drift[tmp] + drift[tmp - 1]) * RTW89_BCN_TRACK_SCALE_FACTOR) / 2;
iqr = q3 - q1;
tmp = (3 * iqr) / 2;
if (bcn_dist->min <= 5)
lower_bound = bcn_dist->min;
else if (q1 > tmp)
lower_bound = (q1 - tmp) / RTW89_BCN_TRACK_SCALE_FACTOR;
else
lower_bound = 0;
upper_bound = (q3 + tmp) / RTW89_BCN_TRACK_SCALE_FACTOR;
for (i = 0; i < RTW89_BCN_TRACK_STAT_NR; i++) {
u16 tbtt = bcn_stat->tbtt_tu[i];
u16 min = bcn_stat->tbtt_tu_min;
u8 bin_idx;
/* histogram */
bin_idx = min((tbtt - min) / RTW89_BCN_TRACK_BIN_WIDTH,
RTW89_BCN_TRACK_MAX_BIN_NUM - 1);
bins[bin_idx]++;
/* boxplot outlier */
if (drift[i] < lower_bound || drift[i] > upper_bound)
outlier_count++;
}
bcn_dist->outlier_count = outlier_count;
bcn_dist->lower_bound = lower_bound;
bcn_dist->upper_bound = upper_bound;
}
static u8 rtw89_bcn_get_coverage(struct rtw89_dev *rtwdev, u16 threshold)
{
struct rtw89_beacon_stat *bcn_stat = &rtwdev->phystat.bcn_stat;
int l = 0, r = RTW89_BCN_TRACK_STAT_NR - 1, m;
u16 *drift = bcn_stat->drift;
int index = -1;
u8 count = 0;
while (l <= r) {
m = l + (r - l) / 2;
if (drift[m] <= threshold) {
index = m;
l = m + 1;
} else {
r = m - 1;
}
}
count = (index == -1) ? 0 : (index + 1);
return (count * PERCENT) / RTW89_BCN_TRACK_STAT_NR;
}
static u16 rtw89_bcn_get_histogram_bound(struct rtw89_dev *rtwdev, u8 target)
{
struct rtw89_beacon_stat *bcn_stat = &rtwdev->phystat.bcn_stat;
struct rtw89_beacon_dist *bcn_dist = &bcn_stat->bcn_dist;
u16 tbtt_tu_max = bcn_stat->tbtt_tu_max;
u16 upper, lower = bcn_stat->tbtt_tu_min;
u8 i, count = 0;
for (i = 0; i < RTW89_BCN_TRACK_MAX_BIN_NUM; i++) {
upper = lower + RTW89_BCN_TRACK_BIN_WIDTH - 1;
if (i == RTW89_BCN_TRACK_MAX_BIN_NUM - 1)
upper = max(upper, tbtt_tu_max);
count += bcn_dist->bins[i];
if (count > target)
break;
lower = upper + 1;
}
return upper;
}
static u16 rtw89_bcn_get_rx_time(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan)
{
#define RTW89_SYMBOL_TIME_2GHZ 192
#define RTW89_SYMBOL_TIME_5GHZ 20
#define RTW89_SYMBOL_TIME_6GHZ 20
struct rtw89_pkt_stat *pkt_stat = &rtwdev->phystat.cur_pkt_stat;
u16 bitrate, val;
if (!rtw89_legacy_rate_to_bitrate(rtwdev, pkt_stat->beacon_rate, &bitrate))
return 0;
val = (pkt_stat->beacon_len * 8 * RTW89_BCN_TRACK_SCALE_FACTOR) / bitrate;
switch (chan->band_type) {
default:
case RTW89_BAND_2G:
val += RTW89_SYMBOL_TIME_2GHZ;
break;
case RTW89_BAND_5G:
val += RTW89_SYMBOL_TIME_5GHZ;
break;
case RTW89_BAND_6G:
val += RTW89_SYMBOL_TIME_6GHZ;
break;
}
/* convert to millisecond */
return DIV_ROUND_UP(val, 1000);
}
static void rtw89_bcn_calc_timeout(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link)
{
#define RTW89_BCN_TRACK_EXTEND_TIMEOUT 5
#define RTW89_BCN_TRACK_COVERAGE_TH 0 /* unit: TU */
#define RTW89_BCN_TRACK_STRONG_RSSI 80
const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx);
struct rtw89_pkt_stat *pkt_stat = &rtwdev->phystat.cur_pkt_stat;
struct rtw89_beacon_stat *bcn_stat = &rtwdev->phystat.bcn_stat;
struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track;
struct rtw89_beacon_dist *bcn_dist = &bcn_stat->bcn_dist;
u16 outlier_high_bcn_th = bcn_track->outlier_high_bcn_th;
u16 outlier_low_bcn_th = bcn_track->outlier_low_bcn_th;
u8 rssi = ewma_rssi_read(&rtwdev->phystat.bcn_rssi);
u16 target_bcn_th = bcn_track->target_bcn_th;
u16 low_bcn_th = bcn_track->low_bcn_th;
u16 med_bcn_th = bcn_track->med_bcn_th;
u16 beacon_int = bcn_track->beacon_int;
u16 bcn_timeout;
if (pkt_stat->beacon_nr < low_bcn_th) {
bcn_timeout = (RTW89_BCN_TRACK_TARGET_BCN * beacon_int) / PERCENT;
goto out;
}
if (bcn_dist->outlier_count >= outlier_high_bcn_th) {
bcn_timeout = bcn_dist->max;
goto out;
}
if (pkt_stat->beacon_nr < med_bcn_th) {
if (bcn_dist->outlier_count > outlier_low_bcn_th)
bcn_timeout = (bcn_dist->max + bcn_dist->upper_bound) / 2;
else
bcn_timeout = bcn_dist->upper_bound +
RTW89_BCN_TRACK_EXTEND_TIMEOUT;
goto out;
}
if (rssi >= RTW89_BCN_TRACK_STRONG_RSSI) {
if (rtw89_bcn_get_coverage(rtwdev, RTW89_BCN_TRACK_COVERAGE_TH) >= 90) {
/* ideal case */
bcn_timeout = 0;
} else {
u16 offset_tu = bcn_track->tbtt_offset / 1024;
u16 upper_bound;
upper_bound =
rtw89_bcn_get_histogram_bound(rtwdev, target_bcn_th);
bcn_timeout =
_rtw89_bcn_calc_drift(upper_bound, offset_tu, beacon_int);
}
goto out;
}
bcn_timeout = bcn_stat->drift[target_bcn_th];
out:
bcn_track->bcn_timeout = bcn_timeout + rtw89_bcn_get_rx_time(rtwdev, chan);
}
static void rtw89_bcn_update_timeout(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link)
{
rtw89_bcn_calc_drift(rtwdev);
rtw89_bcn_calc_distribution(rtwdev);
rtw89_bcn_calc_timeout(rtwdev, rtwvif_link);
}
static void rtw89_core_bcn_track(struct rtw89_dev *rtwdev)
{
struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track;
struct rtw89_vif_link *rtwvif_link;
struct rtw89_vif *rtwvif;
unsigned int link_id;
if (!RTW89_CHK_FW_FEATURE(BEACON_TRACKING, &rtwdev->fw))
return;
if (!rtwdev->lps_enabled)
return;
if (!bcn_track->is_data_ready)
return;
rtw89_for_each_rtwvif(rtwdev, rtwvif) {
rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) {
if (!(rtwvif_link->wifi_role == RTW89_WIFI_ROLE_STATION ||
rtwvif_link->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT))
continue;
rtw89_bcn_update_tbtt_offset(rtwdev, rtwvif_link);
rtw89_bcn_update_timeout(rtwdev, rtwvif_link);
}
}
}
static bool rtw89_core_bcn_track_can_lps(struct rtw89_dev *rtwdev)
{
struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track;
if (!RTW89_CHK_FW_FEATURE(BEACON_TRACKING, &rtwdev->fw))
return true;
return bcn_track->is_data_ready;
}
static void rtw89_core_bcn_track_assoc(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link)
{
#define RTW89_BCN_TRACK_MED_BCN 70
#define RTW89_BCN_TRACK_LOW_BCN 30
#define RTW89_BCN_TRACK_OUTLIER_HIGH_BCN 30
#define RTW89_BCN_TRACK_OUTLIER_LOW_BCN 20
struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track;
u32 period = jiffies_to_msecs(RTW89_TRACK_WORK_PERIOD);
struct ieee80211_bss_conf *bss_conf;
u32 beacons_in_period;
u32 bcn_intvl_us;
u16 beacon_int;
u8 dtim;
rcu_read_lock();
bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
beacon_int = bss_conf->beacon_int;
dtim = bss_conf->dtim_period;
rcu_read_unlock();
beacons_in_period = period / beacon_int / dtim;
bcn_intvl_us = ieee80211_tu_to_usec(beacon_int);
bcn_track->low_bcn_th =
(beacons_in_period * RTW89_BCN_TRACK_LOW_BCN) / PERCENT;
bcn_track->med_bcn_th =
(beacons_in_period * RTW89_BCN_TRACK_MED_BCN) / PERCENT;
bcn_track->outlier_low_bcn_th =
(RTW89_BCN_TRACK_STAT_NR * RTW89_BCN_TRACK_OUTLIER_LOW_BCN) / PERCENT;
bcn_track->outlier_high_bcn_th =
(RTW89_BCN_TRACK_STAT_NR * RTW89_BCN_TRACK_OUTLIER_HIGH_BCN) / PERCENT;
bcn_track->target_bcn_th =
(RTW89_BCN_TRACK_STAT_NR * RTW89_BCN_TRACK_TARGET_BCN) / PERCENT;
bcn_track->close_bcn_intvl_th = ieee80211_tu_to_usec(beacon_int - 3);
bcn_track->tbtt_diff_th = (bcn_intvl_us * 85) / PERCENT;
bcn_track->beacon_int = beacon_int;
bcn_track->dtim = dtim;
}
static void rtw89_core_bcn_track_reset(struct rtw89_dev *rtwdev)
{
memset(&rtwdev->phystat.bcn_stat, 0, sizeof(rtwdev->phystat.bcn_stat));
memset(&rtwdev->bcn_track, 0, sizeof(rtwdev->bcn_track));
}
static void rtw89_vif_rx_bcn_stat(struct rtw89_dev *rtwdev,
struct ieee80211_bss_conf *bss_conf,
struct sk_buff *skb)
{
#define RTW89_APPEND_TSF_2GHZ 384
#define RTW89_APPEND_TSF_5GHZ 52
#define RTW89_APPEND_TSF_6GHZ 52
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
struct rtw89_beacon_stat *bcn_stat = &rtwdev->phystat.bcn_stat;
struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track;
u32 bcn_intvl_us = ieee80211_tu_to_usec(bss_conf->beacon_int);
u64 tsf = le64_to_cpu(mgmt->u.beacon.timestamp);
u8 wp, num = bcn_stat->num;
u16 append;
if (!RTW89_CHK_FW_FEATURE(BEACON_TRACKING, &rtwdev->fw))
return;
switch (rx_status->band) {
default:
case NL80211_BAND_2GHZ:
append = RTW89_APPEND_TSF_2GHZ;
break;
case NL80211_BAND_5GHZ:
append = RTW89_APPEND_TSF_5GHZ;
break;
case NL80211_BAND_6GHZ:
append = RTW89_APPEND_TSF_6GHZ;
break;
}
wp = bcn_stat->wp;
div_u64_rem(tsf - append, bcn_intvl_us, &bcn_stat->tbtt_us[wp]);
bcn_stat->tbtt_tu[wp] = bcn_stat->tbtt_us[wp] / 1024;
bcn_stat->wp = (wp + 1) % RTW89_BCN_TRACK_STAT_NR;
bcn_stat->num = umin(num + 1, RTW89_BCN_TRACK_STAT_NR);
bcn_track->is_data_ready = bcn_stat->num == RTW89_BCN_TRACK_STAT_NR;
}
static void rtw89_vif_rx_stats_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
@ -2237,6 +2741,7 @@ static void rtw89_vif_rx_stats_iter(void *data, u8 *mac,
struct ieee80211_bss_conf *bss_conf;
struct rtw89_vif_link *rtwvif_link;
const u8 *bssid = iter_data->bssid;
const u8 *target_bssid;
if (rtwdev->scanning &&
(ieee80211_is_beacon(hdr->frame_control) ||
@ -2258,7 +2763,10 @@ static void rtw89_vif_rx_stats_iter(void *data, u8 *mac,
goto out;
}
if (!ether_addr_equal(bss_conf->bssid, bssid))
target_bssid = ieee80211_is_beacon(hdr->frame_control) &&
bss_conf->nontransmitted ?
bss_conf->transmitter_bssid : bss_conf->bssid;
if (!ether_addr_equal(target_bssid, bssid))
goto out;
if (is_mld) {
@ -2272,7 +2780,6 @@ static void rtw89_vif_rx_stats_iter(void *data, u8 *mac,
rtw89_vif_sync_bcn_tsf(rtwvif_link, hdr, skb->len);
rtw89_fw_h2c_rssi_offload(rtwdev, phy_ppdu);
}
pkt_stat->beacon_nr++;
if (phy_ppdu) {
ewma_rssi_add(&rtwdev->phystat.bcn_rssi, phy_ppdu->rssi_avg);
@ -2280,7 +2787,11 @@ static void rtw89_vif_rx_stats_iter(void *data, u8 *mac,
rtwvif_link->bcn_bw_idx = phy_ppdu->bw_idx;
}
pkt_stat->beacon_nr++;
pkt_stat->beacon_rate = desc_info->data_rate;
pkt_stat->beacon_len = skb->len;
rtw89_vif_rx_bcn_stat(rtwdev, bss_conf, skb);
}
if (!ether_addr_equal(bss_conf->addr, hdr->addr1))
@ -3226,7 +3737,7 @@ static u32 rtw89_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev, u8 tid)
u8 qsel, ch_dma;
qsel = rtw89_core_get_qsel(rtwdev, tid);
ch_dma = rtw89_core_get_ch_dma(rtwdev, qsel);
ch_dma = rtw89_chip_get_ch_dma(rtwdev, qsel);
return rtw89_hci_check_and_reclaim_tx_resource(rtwdev, ch_dma);
}
@ -3411,6 +3922,7 @@ int rtw89_core_send_nullfunc(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rt
struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
int link_id = ieee80211_vif_is_mld(vif) ? rtwvif_link->link_id : -1;
struct rtw89_sta_link *rtwsta_link;
struct rtw89_tx_wait_info *wait;
struct ieee80211_sta *sta;
struct ieee80211_hdr *hdr;
struct rtw89_sta *rtwsta;
@ -3420,6 +3932,12 @@ int rtw89_core_send_nullfunc(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rt
if (vif->type != NL80211_IFTYPE_STATION || !vif->cfg.assoc)
return 0;
wait = kzalloc(sizeof(*wait), GFP_KERNEL);
if (!wait)
return -ENOMEM;
init_completion(&wait->completion);
rcu_read_lock();
sta = ieee80211_find_sta(vif, vif->cfg.ap_addr);
if (!sta) {
@ -3434,6 +3952,8 @@ int rtw89_core_send_nullfunc(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rt
goto out;
}
wait->skb = skb;
hdr = (struct ieee80211_hdr *)skb->data;
if (ps)
hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
@ -3441,10 +3961,12 @@ int rtw89_core_send_nullfunc(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rt
rtwsta_link = rtwsta->links[rtwvif_link->link_id];
if (unlikely(!rtwsta_link)) {
ret = -ENOLINK;
dev_kfree_skb_any(skb);
goto out;
}
ret = rtw89_core_tx_write_link(rtwdev, rtwvif_link, rtwsta_link, skb, &qsel, true);
ret = rtw89_core_tx_write_link(rtwdev, rtwvif_link, rtwsta_link, skb, &qsel, true,
wait);
if (ret) {
rtw89_warn(rtwdev, "nullfunc transmit failed: %d\n", ret);
dev_kfree_skb_any(skb);
@ -3453,10 +3975,11 @@ int rtw89_core_send_nullfunc(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rt
rcu_read_unlock();
return rtw89_core_tx_kick_off_and_wait(rtwdev, skb, qsel,
return rtw89_core_tx_kick_off_and_wait(rtwdev, skb, wait, qsel,
timeout);
out:
rcu_read_unlock();
kfree(wait);
return ret;
}
@ -3697,6 +4220,9 @@ static void rtw89_enter_lps_track(struct rtw89_dev *rtwdev)
vif->type == NL80211_IFTYPE_P2P_CLIENT))
continue;
if (!rtw89_core_bcn_track_can_lps(rtwdev))
continue;
rtw89_enter_lps(rtwdev, rtwvif, true);
}
}
@ -3883,6 +4409,7 @@ static void rtw89_track_work(struct wiphy *wiphy, struct wiphy_work *work)
rtw89_btc_ntfy_wl_sta(rtwdev);
}
rtw89_mac_bf_monitor_track(rtwdev);
rtw89_core_bcn_track(rtwdev);
rtw89_phy_stat_track(rtwdev);
rtw89_phy_env_monitor_track(rtwdev);
rtw89_phy_dig(rtwdev);
@ -4129,8 +4656,10 @@ int rtw89_core_sta_link_disassoc(struct rtw89_dev *rtwdev,
rtw89_assoc_link_clr(rtwsta_link);
if (vif->type == NL80211_IFTYPE_STATION)
if (vif->type == NL80211_IFTYPE_STATION) {
rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, rtwvif_link, false);
rtw89_core_bcn_track_reset(rtwdev);
}
if (rtwvif_link->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT)
rtw89_p2p_noa_once_deinit(rtwvif_link);
@ -4271,6 +4800,7 @@ int rtw89_core_sta_link_assoc(struct rtw89_dev *rtwdev,
BTC_ROLE_MSTS_STA_CONN_END);
rtw89_core_get_no_ul_ofdma_htc(rtwdev, &rtwsta_link->htc_template, chan);
rtw89_phy_ul_tb_assoc(rtwdev, rtwvif_link);
rtw89_core_bcn_track_assoc(rtwdev, rtwvif_link);
ret = rtw89_fw_h2c_general_pkt(rtwdev, rtwvif_link, rtwsta_link->mac_id);
if (ret) {
@ -4829,39 +5359,96 @@ void rtw89_core_csa_beacon_work(struct wiphy *wiphy, struct wiphy_work *work)
}
}
int rtw89_wait_for_cond(struct rtw89_wait_info *wait, unsigned int cond)
struct rtw89_wait_response *
rtw89_wait_for_cond_prep(struct rtw89_wait_info *wait, unsigned int cond)
{
struct completion *cmpl = &wait->completion;
unsigned long time_left;
struct rtw89_wait_response *prep;
unsigned int cur;
/* use -EPERM _iff_ telling eval side not to make any changes */
cur = atomic_cmpxchg(&wait->cond, RTW89_WAIT_COND_IDLE, cond);
if (cur != RTW89_WAIT_COND_IDLE)
return -EBUSY;
return ERR_PTR(-EPERM);
time_left = wait_for_completion_timeout(cmpl, RTW89_WAIT_FOR_COND_TIMEOUT);
if (time_left == 0) {
atomic_set(&wait->cond, RTW89_WAIT_COND_IDLE);
return -ETIMEDOUT;
prep = kzalloc(sizeof(*prep), GFP_KERNEL);
if (!prep)
return ERR_PTR(-ENOMEM);
init_completion(&prep->completion);
rcu_assign_pointer(wait->resp, prep);
return prep;
}
int rtw89_wait_for_cond_eval(struct rtw89_wait_info *wait,
struct rtw89_wait_response *prep, int err)
{
unsigned long time_left;
if (IS_ERR(prep)) {
err = err ?: PTR_ERR(prep);
/* special error case: no permission to reset anything */
if (PTR_ERR(prep) == -EPERM)
return err;
goto reset;
}
if (err)
goto cleanup;
time_left = wait_for_completion_timeout(&prep->completion,
RTW89_WAIT_FOR_COND_TIMEOUT);
if (time_left == 0) {
err = -ETIMEDOUT;
goto cleanup;
}
wait->data = prep->data;
cleanup:
rcu_assign_pointer(wait->resp, NULL);
kfree_rcu(prep, rcu_head);
reset:
atomic_set(&wait->cond, RTW89_WAIT_COND_IDLE);
if (err)
return err;
if (wait->data.err)
return -EFAULT;
return 0;
}
static void rtw89_complete_cond_resp(struct rtw89_wait_response *resp,
const struct rtw89_completion_data *data)
{
resp->data = *data;
complete(&resp->completion);
}
void rtw89_complete_cond(struct rtw89_wait_info *wait, unsigned int cond,
const struct rtw89_completion_data *data)
{
struct rtw89_wait_response *resp;
unsigned int cur;
guard(rcu)();
resp = rcu_dereference(wait->resp);
if (!resp)
return;
cur = atomic_cmpxchg(&wait->cond, cond, RTW89_WAIT_COND_IDLE);
if (cur != cond)
return;
wait->data = *data;
complete(&wait->completion);
rtw89_complete_cond_resp(resp, data);
}
void rtw89_core_ntfy_btc_event(struct rtw89_dev *rtwdev, enum rtw89_btc_hmsg event)
@ -4908,6 +5495,8 @@ int rtw89_core_start(struct rtw89_dev *rtwdev)
{
int ret;
rtw89_phy_init_bb_afe(rtwdev);
ret = rtw89_mac_init(rtwdev);
if (ret) {
rtw89_err(rtwdev, "mac init fail, ret:%d\n", ret);
@ -4952,6 +5541,7 @@ int rtw89_core_start(struct rtw89_dev *rtwdev)
rtw89_btc_ntfy_radio_state(rtwdev, BTC_RFCTRL_WL_ON);
rtw89_fw_h2c_fw_log(rtwdev, rtwdev->fw.log.enable);
rtw89_fw_h2c_init_ba_cam(rtwdev);
rtw89_tas_fw_timer_enable(rtwdev, true);
return 0;
}
@ -4967,6 +5557,7 @@ void rtw89_core_stop(struct rtw89_dev *rtwdev)
if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags))
return;
rtw89_tas_fw_timer_enable(rtwdev, false);
rtw89_btc_ntfy_radio_state(rtwdev, BTC_RFCTRL_WL_OFF);
clear_bit(RTW89_FLAG_RUNNING, rtwdev->flags);
@ -4978,6 +5569,7 @@ void rtw89_core_stop(struct rtw89_dev *rtwdev)
wiphy_work_cancel(wiphy, &btc->dhcp_notify_work);
wiphy_work_cancel(wiphy, &btc->icmp_notify_work);
cancel_delayed_work_sync(&rtwdev->txq_reinvoke_work);
wiphy_delayed_work_cancel(wiphy, &rtwdev->tx_wait_work);
wiphy_delayed_work_cancel(wiphy, &rtwdev->track_work);
wiphy_delayed_work_cancel(wiphy, &rtwdev->track_ps_work);
wiphy_delayed_work_cancel(wiphy, &rtwdev->chanctx_work);
@ -5203,6 +5795,7 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
INIT_LIST_HEAD(&rtwdev->scan_info.pkt_list[band]);
}
INIT_LIST_HEAD(&rtwdev->scan_info.chan_list);
INIT_LIST_HEAD(&rtwdev->tx_waits);
INIT_WORK(&rtwdev->ba_work, rtw89_core_ba_work);
INIT_WORK(&rtwdev->txq_work, rtw89_core_txq_work);
INIT_DELAYED_WORK(&rtwdev->txq_reinvoke_work, rtw89_core_txq_reinvoke_work);
@ -5214,6 +5807,7 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
wiphy_delayed_work_init(&rtwdev->coex_rfk_chk_work, rtw89_coex_rfk_chk_work);
wiphy_delayed_work_init(&rtwdev->cfo_track_work, rtw89_phy_cfo_track_work);
wiphy_delayed_work_init(&rtwdev->mcc_prepare_done_work, rtw89_mcc_prepare_done_work);
wiphy_delayed_work_init(&rtwdev->tx_wait_work, rtw89_tx_wait_work);
INIT_DELAYED_WORK(&rtwdev->forbid_ba_work, rtw89_forbid_ba_work);
wiphy_delayed_work_init(&rtwdev->antdiv_work, rtw89_phy_antdiv_work);
rtwdev->txq_wq = alloc_workqueue("rtw89_tx_wq", WQ_UNBOUND | WQ_HIGHPRI, 0);
@ -5813,6 +6407,7 @@ int rtw89_core_register(struct rtw89_dev *rtwdev)
return ret;
}
rtw89_phy_dm_init_data(rtwdev);
rtw89_debugfs_init(rtwdev);
return 0;
@ -5863,6 +6458,9 @@ struct rtw89_dev *rtw89_alloc_ieee80211_hw(struct device *device,
ops->cancel_remain_on_channel = NULL;
}
if (!chip->support_noise)
ops->get_survey = NULL;
driver_data_size = sizeof(struct rtw89_dev) + bus_data_size;
hw = ieee80211_alloc_hw(driver_data_size, ops);
if (!hw)

150
core.h
View file

@ -1011,6 +1011,7 @@ struct rtw89_port_reg {
u32 ptcl_dbg;
u32 ptcl_dbg_info;
u32 bcn_drop_all;
u32 bcn_psr_rpt;
u32 hiq_win[RTW89_PORT_NUM];
};
@ -3506,9 +3507,12 @@ struct rtw89_phy_rate_pattern {
bool enable;
};
#define RTW89_TX_WAIT_WORK_TIMEOUT msecs_to_jiffies(500)
struct rtw89_tx_wait_info {
struct rcu_head rcu_head;
struct list_head list;
struct completion completion;
struct sk_buff *skb;
bool tx_done;
};
@ -3759,6 +3763,7 @@ struct rtw89_chip_ops {
void (*fill_txdesc_fwcmd)(struct rtw89_dev *rtwdev,
struct rtw89_tx_desc_info *desc_info,
void *txdesc);
u8 (*get_ch_dma)(struct rtw89_dev *rtwdev, u8 qsel);
int (*cfg_ctrl_path)(struct rtw89_dev *rtwdev, bool wl);
int (*mac_cfg_gnt)(struct rtw89_dev *rtwdev,
const struct rtw89_mac_ax_coex_gnt *gnt_cfg);
@ -4363,6 +4368,9 @@ struct rtw89_chanctx_listener {
(struct rtw89_dev *rtwdev, enum rtw89_chanctx_state state);
};
#define RTW89_NHM_TH_NUM 11
#define RTW89_NHM_RPT_NUM 12
struct rtw89_chip_info {
enum rtw89_core_chip_id chip_id;
enum rtw89_chip_gen chip_gen;
@ -4397,6 +4405,7 @@ struct rtw89_chip_info {
bool support_ant_gain;
bool support_tas;
bool support_sar_by_ant;
bool support_noise;
bool ul_tb_waveform_ctrl;
bool ul_tb_pwr_diff;
bool rx_freq_frome_ie;
@ -4481,6 +4490,8 @@ struct rtw89_chip_info {
bool cfo_hw_comp;
const struct rtw89_reg_def *dcfo_comp;
u8 dcfo_comp_sft;
const struct rtw89_reg_def (*nhm_report)[RTW89_NHM_RPT_NUM];
const struct rtw89_reg_def (*nhm_th)[RTW89_NHM_TH_NUM];
const struct rtw89_imr_info *imr_info;
const struct rtw89_imr_table *imr_dmac_table;
const struct rtw89_imr_table *imr_cmac_table;
@ -4542,17 +4553,23 @@ struct rtw89_completion_data {
u8 buf[RTW89_COMPLETION_BUF_SIZE];
};
struct rtw89_wait_info {
atomic_t cond;
struct rtw89_wait_response {
struct rcu_head rcu_head;
struct completion completion;
struct rtw89_completion_data data;
};
struct rtw89_wait_info {
atomic_t cond;
struct rtw89_completion_data data;
struct rtw89_wait_response __rcu *resp;
};
#define RTW89_WAIT_FOR_COND_TIMEOUT msecs_to_jiffies(100)
static inline void rtw89_init_wait(struct rtw89_wait_info *wait)
{
init_completion(&wait->completion);
rcu_assign_pointer(wait->resp, NULL);
atomic_set(&wait->cond, RTW89_WAIT_COND_IDLE);
}
@ -4622,6 +4639,7 @@ enum rtw89_fw_feature {
RTW89_FW_FEATURE_SCAN_OFFLOAD_EXTRA_OP,
RTW89_FW_FEATURE_RFK_NTFY_MCC_V0,
RTW89_FW_FEATURE_LPS_DACK_BY_C2H_REG,
RTW89_FW_FEATURE_BEACON_TRACKING,
};
struct rtw89_fw_suit {
@ -4681,6 +4699,7 @@ struct rtw89_fw_elm_info {
struct rtw89_fw_txpwr_track_cfg *txpwr_trk;
struct rtw89_phy_rfk_log_fmt *rfk_log_fmt;
const struct rtw89_regd_data *regd;
const struct rtw89_fw_element_hdr *afe;
};
enum rtw89_fw_mss_dev_type {
@ -5074,9 +5093,36 @@ struct rtw89_pkt_drop_params {
struct rtw89_pkt_stat {
u16 beacon_nr;
u8 beacon_rate;
u32 beacon_len;
u32 rx_rate_cnt[RTW89_HW_RATE_NR];
};
#define RTW89_BCN_TRACK_STAT_NR 32
#define RTW89_BCN_TRACK_SCALE_FACTOR 10
#define RTW89_BCN_TRACK_MAX_BIN_NUM 6
#define RTW89_BCN_TRACK_BIN_WIDTH 5
#define RTW89_BCN_TRACK_TARGET_BCN 80
struct rtw89_beacon_dist {
u16 min;
u16 max;
u16 outlier_count;
u16 lower_bound;
u16 upper_bound;
u16 bins[RTW89_BCN_TRACK_MAX_BIN_NUM];
};
struct rtw89_beacon_stat {
u8 num;
u8 wp;
u16 tbtt_tu_min;
u16 tbtt_tu_max;
u16 drift[RTW89_BCN_TRACK_STAT_NR];
u32 tbtt_us[RTW89_BCN_TRACK_STAT_NR];
u16 tbtt_tu[RTW89_BCN_TRACK_STAT_NR];
struct rtw89_beacon_dist bcn_dist;
};
DECLARE_EWMA(thermal, 4, 4);
struct rtw89_phy_stat {
@ -5085,6 +5131,7 @@ struct rtw89_phy_stat {
struct ewma_rssi bcn_rssi;
struct rtw89_pkt_stat cur_pkt_stat;
struct rtw89_pkt_stat last_pkt_stat;
struct rtw89_beacon_stat bcn_stat;
};
enum rtw89_rfk_report_state {
@ -5434,6 +5481,7 @@ enum rtw89_env_racing_lv {
struct rtw89_ccx_para_info {
enum rtw89_env_racing_lv rac_lv;
u16 mntr_time;
bool nhm_incld_cca;
u8 nhm_manual_th_ofst;
u8 nhm_manual_th0;
enum rtw89_ifs_clm_application ifs_clm_app;
@ -5467,9 +5515,13 @@ enum rtw89_ccx_edcca_opt_bw_idx {
RTW89_CCX_EDCCA_BW20_7 = 7
};
#define RTW89_NHM_TH_NUM 11
struct rtw89_nhm_report {
struct list_head list;
struct ieee80211_channel *channel;
u8 noise;
};
#define RTW89_FAHM_TH_NUM 11
#define RTW89_NHM_RPT_NUM 12
#define RTW89_FAHM_RPT_NUM 12
#define RTW89_IFS_CLM_NUM 4
struct rtw89_env_monitor_info {
@ -5503,6 +5555,13 @@ struct rtw89_env_monitor_info {
u16 ifs_clm_ofdm_fa_permil;
u32 ifs_clm_ifs_avg[RTW89_IFS_CLM_NUM];
u32 ifs_clm_cca_avg[RTW89_IFS_CLM_NUM];
bool nhm_include_cca;
u32 nhm_sum;
u32 nhm_mntr_time;
u16 nhm_result[RTW89_NHM_RPT_NUM];
u8 nhm_th[RTW89_NHM_RPT_NUM];
struct rtw89_nhm_report *nhm_his[RTW89_BAND_NUM];
struct list_head nhm_rpt_list;
};
enum rtw89_ser_rcvy_step {
@ -5715,8 +5774,8 @@ struct rtw89_wow_gtk_info {
u8 kck[32];
u8 kek[32];
u8 tk1[16];
u8 txmickey[8];
u8 rxmickey[8];
u8 txmickey[8];
__le32 igtk_keyid;
__le64 ipn;
u8 igtk[2][32];
@ -5882,6 +5941,24 @@ struct rtw89_mlo_info {
struct rtw89_wait_info wait;
};
struct rtw89_beacon_track_info {
bool is_data_ready;
u32 tbtt_offset; /* in unit of microsecond */
u16 bcn_timeout; /* in unit of millisecond */
/* The following are constant and set at association. */
u8 dtim;
u16 beacon_int;
u16 low_bcn_th;
u16 med_bcn_th;
u16 high_bcn_th;
u16 target_bcn_th;
u16 outlier_low_bcn_th;
u16 outlier_high_bcn_th;
u32 close_bcn_intvl_th;
u32 tbtt_diff_th;
};
struct rtw89_dev {
struct ieee80211_hw *hw;
struct device *dev;
@ -5896,6 +5973,7 @@ struct rtw89_dev {
const struct rtw89_pci_info *pci_info;
const struct rtw89_rfe_parms *rfe_parms;
struct rtw89_hal hal;
struct rtw89_beacon_track_info bcn_track;
struct rtw89_mcc_info mcc;
struct rtw89_mlo_info mlo;
struct rtw89_mac_info mac;
@ -5925,6 +6003,9 @@ struct rtw89_dev {
/* used to protect rpwm */
spinlock_t rpwm_lock;
struct list_head tx_waits;
struct wiphy_delayed_work tx_wait_work;
struct rtw89_cam_info cam_info;
struct sk_buff_head c2h_queue;
@ -6181,6 +6262,26 @@ rtw89_assoc_link_rcu_dereference(struct rtw89_dev *rtwdev, u8 macid)
list_first_entry_or_null(&p->dlink_pool, typeof(*p->links_inst), dlink_schd); \
})
static inline void rtw89_tx_wait_release(struct rtw89_tx_wait_info *wait)
{
dev_kfree_skb_any(wait->skb);
kfree_rcu(wait, rcu_head);
}
static inline void rtw89_tx_wait_list_clear(struct rtw89_dev *rtwdev)
{
struct rtw89_tx_wait_info *wait, *tmp;
lockdep_assert_wiphy(rtwdev->hw->wiphy);
list_for_each_entry_safe(wait, tmp, &rtwdev->tx_waits, list) {
if (!completion_done(&wait->completion))
continue;
list_del(&wait->list);
rtw89_tx_wait_release(wait);
}
}
static inline int rtw89_hci_tx_write(struct rtw89_dev *rtwdev,
struct rtw89_core_tx_request *tx_req)
{
@ -6190,6 +6291,7 @@ static inline int rtw89_hci_tx_write(struct rtw89_dev *rtwdev,
static inline void rtw89_hci_reset(struct rtw89_dev *rtwdev)
{
rtwdev->hci.ops->reset(rtwdev);
rtw89_tx_wait_list_clear(rtwdev);
}
static inline int rtw89_hci_start(struct rtw89_dev *rtwdev)
@ -6322,9 +6424,13 @@ static inline void rtw89_hci_clear(struct rtw89_dev *rtwdev, struct pci_dev *pde
static inline
struct rtw89_tx_skb_data *RTW89_TX_SKB_CB(struct sk_buff *skb)
{
/*
* This should be used by/after rtw89_hci_tx_write() and before doing
* ieee80211_tx_info_clear_status().
*/
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
return (struct rtw89_tx_skb_data *)info->status.status_driver_data;
return (struct rtw89_tx_skb_data *)info->driver_data;
}
static inline u8 rtw89_read8(struct rtw89_dev *rtwdev, u32 addr)
@ -7130,6 +7236,14 @@ void rtw89_chip_fill_txdesc_fwcmd(struct rtw89_dev *rtwdev,
chip->ops->fill_txdesc_fwcmd(rtwdev, desc_info, txdesc);
}
static inline
u8 rtw89_chip_get_ch_dma(struct rtw89_dev *rtwdev, u8 qsel)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
return chip->ops->get_ch_dma(rtwdev, qsel);
}
static inline
void rtw89_chip_mac_cfg_gnt(struct rtw89_dev *rtwdev,
const struct rtw89_mac_ax_coex_gnt *gnt_cfg)
@ -7258,11 +7372,12 @@ static inline struct sk_buff *rtw89_alloc_skb_for_rx(struct rtw89_dev *rtwdev,
return dev_alloc_skb(length);
}
static inline void rtw89_core_tx_wait_complete(struct rtw89_dev *rtwdev,
static inline bool rtw89_core_tx_wait_complete(struct rtw89_dev *rtwdev,
struct rtw89_tx_skb_data *skb_data,
bool tx_done)
{
struct rtw89_tx_wait_info *wait;
bool ret = false;
rcu_read_lock();
@ -7270,11 +7385,14 @@ static inline void rtw89_core_tx_wait_complete(struct rtw89_dev *rtwdev,
if (!wait)
goto out;
ret = true;
wait->tx_done = tx_done;
complete(&wait->completion);
/* Don't access skb anymore after completion */
complete_all(&wait->completion);
out:
rcu_read_unlock();
return ret;
}
static inline bool rtw89_is_mlo_1_1(struct rtw89_dev *rtwdev)
@ -7358,7 +7476,8 @@ int rtw89_h2c_tx(struct rtw89_dev *rtwdev,
struct sk_buff *skb, bool fwdl);
void rtw89_core_tx_kick_off(struct rtw89_dev *rtwdev, u8 qsel);
int rtw89_core_tx_kick_off_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb,
int qsel, unsigned int timeout);
struct rtw89_tx_wait_info *wait, int qsel,
unsigned int timeout);
void rtw89_core_fill_txdesc(struct rtw89_dev *rtwdev,
struct rtw89_tx_desc_info *desc_info,
void *txdesc);
@ -7374,6 +7493,8 @@ void rtw89_core_fill_txdesc_fwcmd_v1(struct rtw89_dev *rtwdev,
void rtw89_core_fill_txdesc_fwcmd_v2(struct rtw89_dev *rtwdev,
struct rtw89_tx_desc_info *desc_info,
void *txdesc);
u8 rtw89_core_get_ch_dma(struct rtw89_dev *rtwdev, u8 qsel);
u8 rtw89_core_get_ch_dma_v1(struct rtw89_dev *rtwdev, u8 qsel);
void rtw89_core_rx(struct rtw89_dev *rtwdev,
struct rtw89_rx_desc_info *desc_info,
struct sk_buff *skb);
@ -7454,13 +7575,18 @@ void rtw89_vif_type_mapping(struct rtw89_vif_link *rtwvif_link, bool assoc);
int rtw89_chip_info_setup(struct rtw89_dev *rtwdev);
void rtw89_chip_cfg_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link);
bool rtw89_ra_report_to_bitrate(struct rtw89_dev *rtwdev, u8 rpt_rate, u16 *bitrate);
bool rtw89_legacy_rate_to_bitrate(struct rtw89_dev *rtwdev, u8 legacy_rate, u16 *bitrate);
int rtw89_regd_setup(struct rtw89_dev *rtwdev);
int rtw89_regd_init_hint(struct rtw89_dev *rtwdev);
const char *rtw89_regd_get_string(enum rtw89_regulation_type regd);
void rtw89_traffic_stats_init(struct rtw89_dev *rtwdev,
struct rtw89_traffic_stats *stats);
int rtw89_wait_for_cond(struct rtw89_wait_info *wait, unsigned int cond);
struct rtw89_wait_response *
rtw89_wait_for_cond_prep(struct rtw89_wait_info *wait, unsigned int cond)
__acquires(rtw89_wait);
int rtw89_wait_for_cond_eval(struct rtw89_wait_info *wait,
struct rtw89_wait_response *prep, int err)
__releases(rtw89_wait);
void rtw89_complete_cond(struct rtw89_wait_info *wait, unsigned int cond,
const struct rtw89_completion_data *data);
int rtw89_core_start(struct rtw89_dev *rtwdev);

125
debug.c
View file

@ -86,6 +86,7 @@ struct rtw89_debugfs {
struct rtw89_debugfs_priv stations;
struct rtw89_debugfs_priv disable_dm;
struct rtw89_debugfs_priv mlo_mode;
struct rtw89_debugfs_priv beacon_info;
};
struct rtw89_debugfs_iter_data {
@ -3562,6 +3563,58 @@ static int rtw89_dbg_trigger_ctrl_error(struct rtw89_dev *rtwdev)
return 0;
}
static int rtw89_dbg_trigger_mac_error_ax(struct rtw89_dev *rtwdev)
{
u16 val16;
u8 val8;
int ret;
ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_CMAC_SEL);
if (ret)
return ret;
val8 = rtw89_read8(rtwdev, R_AX_CMAC_FUNC_EN);
rtw89_write8(rtwdev, R_AX_CMAC_FUNC_EN, val8 & ~B_AX_TMAC_EN);
mdelay(1);
rtw89_write8(rtwdev, R_AX_CMAC_FUNC_EN, val8);
val16 = rtw89_read16(rtwdev, R_AX_PTCL_IMR0);
rtw89_write16(rtwdev, R_AX_PTCL_IMR0, val16 | B_AX_F2PCMD_EMPTY_ERR_INT_EN);
rtw89_write16(rtwdev, R_AX_PTCL_IMR0, val16);
return 0;
}
static int rtw89_dbg_trigger_mac_error_be(struct rtw89_dev *rtwdev)
{
int ret;
ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_CMAC_SEL);
if (ret)
return ret;
rtw89_write32_set(rtwdev, R_BE_CMAC_FW_TRIGGER_IDCT_ISR,
B_BE_CMAC_FW_TRIG_IDCT | B_BE_CMAC_FW_ERR_IDCT_IMR);
return 0;
}
static int rtw89_dbg_trigger_mac_error(struct rtw89_dev *rtwdev)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
rtw89_leave_ps_mode(rtwdev);
switch (chip->chip_gen) {
case RTW89_CHIP_AX:
return rtw89_dbg_trigger_mac_error_ax(rtwdev);
case RTW89_CHIP_BE:
return rtw89_dbg_trigger_mac_error_be(rtwdev);
default:
return -EOPNOTSUPP;
}
}
static ssize_t
rtw89_debug_priv_fw_crash_get(struct rtw89_dev *rtwdev,
struct rtw89_debugfs_priv *debugfs_priv,
@ -3577,6 +3630,7 @@ rtw89_debug_priv_fw_crash_get(struct rtw89_dev *rtwdev,
enum rtw89_dbg_crash_simulation_type {
RTW89_DBG_SIM_CPU_EXCEPTION = 1,
RTW89_DBG_SIM_CTRL_ERROR = 2,
RTW89_DBG_SIM_MAC_ERROR = 3,
};
static ssize_t
@ -3585,6 +3639,7 @@ rtw89_debug_priv_fw_crash_set(struct rtw89_dev *rtwdev,
const char *buf, size_t count)
{
int (*sim)(struct rtw89_dev *rtwdev);
bool announce = true;
u8 crash_type;
int ret;
@ -3603,11 +3658,19 @@ rtw89_debug_priv_fw_crash_set(struct rtw89_dev *rtwdev,
case RTW89_DBG_SIM_CTRL_ERROR:
sim = rtw89_dbg_trigger_ctrl_error;
break;
case RTW89_DBG_SIM_MAC_ERROR:
sim = rtw89_dbg_trigger_mac_error;
/* Driver SER flow won't get involved; only FW will. */
announce = false;
break;
default:
return -EINVAL;
}
set_bit(RTW89_FLAG_CRASH_SIMULATING, rtwdev->flags);
if (announce)
set_bit(RTW89_FLAG_CRASH_SIMULATING, rtwdev->flags);
ret = sim(rtwdev);
if (ret)
@ -4298,6 +4361,64 @@ rtw89_debug_priv_mlo_mode_set(struct rtw89_dev *rtwdev,
return count;
}
static ssize_t
rtw89_debug_priv_beacon_info_get(struct rtw89_dev *rtwdev,
struct rtw89_debugfs_priv *debugfs_priv,
char *buf, size_t bufsz)
{
struct rtw89_pkt_stat *pkt_stat = &rtwdev->phystat.last_pkt_stat;
struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track;
struct rtw89_beacon_stat *bcn_stat = &rtwdev->phystat.bcn_stat;
struct rtw89_beacon_dist *bcn_dist = &bcn_stat->bcn_dist;
u16 upper, lower = bcn_stat->tbtt_tu_min;
char *p = buf, *end = buf + bufsz;
u16 *drift = bcn_stat->drift;
u8 bcn_num = bcn_stat->num;
u8 count;
u8 i;
p += scnprintf(p, end - p, "[Beacon info]\n");
p += scnprintf(p, end - p, "count: %u\n", pkt_stat->beacon_nr);
p += scnprintf(p, end - p, "interval: %u\n", bcn_track->beacon_int);
p += scnprintf(p, end - p, "dtim: %u\n", bcn_track->dtim);
p += scnprintf(p, end - p, "raw rssi: %lu\n",
ewma_rssi_read(&rtwdev->phystat.bcn_rssi));
p += scnprintf(p, end - p, "hw rate: %u\n", pkt_stat->beacon_rate);
p += scnprintf(p, end - p, "length: %u\n", pkt_stat->beacon_len);
p += scnprintf(p, end - p, "\n[Distribution]\n");
p += scnprintf(p, end - p, "tbtt\n");
for (i = 0; i < RTW89_BCN_TRACK_MAX_BIN_NUM; i++) {
upper = lower + RTW89_BCN_TRACK_BIN_WIDTH - 1;
if (i == RTW89_BCN_TRACK_MAX_BIN_NUM - 1)
upper = max(upper, bcn_stat->tbtt_tu_max);
p += scnprintf(p, end - p, "%02u - %02u: %u\n",
lower, upper, bcn_dist->bins[i]);
lower = upper + 1;
}
p += scnprintf(p, end - p, "\ndrift\n");
for (i = 0; i < bcn_num; i += count) {
count = 1;
while (i + count < bcn_num && drift[i] == drift[i + count])
count++;
p += scnprintf(p, end - p, "%u: %u\n", drift[i], count);
}
p += scnprintf(p, end - p, "\nlower bound: %u\n", bcn_dist->lower_bound);
p += scnprintf(p, end - p, "upper bound: %u\n", bcn_dist->upper_bound);
p += scnprintf(p, end - p, "outlier count: %u\n", bcn_dist->outlier_count);
p += scnprintf(p, end - p, "\n[Tracking]\n");
p += scnprintf(p, end - p, "tbtt offset: %u\n", bcn_track->tbtt_offset);
p += scnprintf(p, end - p, "bcn timeout: %u\n", bcn_track->bcn_timeout);
return p - buf;
}
#define rtw89_debug_priv_get(name, opts...) \
{ \
.cb_read = rtw89_debug_priv_ ##name## _get, \
@ -4356,6 +4477,7 @@ static const struct rtw89_debugfs rtw89_debugfs_templ = {
.stations = rtw89_debug_priv_get(stations, RLOCK),
.disable_dm = rtw89_debug_priv_set_and_get(disable_dm, RWLOCK),
.mlo_mode = rtw89_debug_priv_set_and_get(mlo_mode, RWLOCK),
.beacon_info = rtw89_debug_priv_get(beacon_info),
};
#define rtw89_debugfs_add(name, mode, fopname, parent) \
@ -4401,6 +4523,7 @@ void rtw89_debugfs_add_sec1(struct rtw89_dev *rtwdev, struct dentry *debugfs_top
rtw89_debugfs_add_r(stations);
rtw89_debugfs_add_rw(disable_dm);
rtw89_debugfs_add_rw(mlo_mode);
rtw89_debugfs_add_r(beacon_info);
}
void rtw89_debugfs_init(struct rtw89_dev *rtwdev)

View file

@ -56,6 +56,7 @@ static inline void rtw89_debugfs_deinit(struct rtw89_dev *rtwdev) {}
#endif
#define rtw89_info(rtwdev, a...) dev_info((rtwdev)->dev, ##a)
#define rtw89_info_once(rtwdev, a...) dev_info_once((rtwdev)->dev, ##a)
#define rtw89_warn(rtwdev, a...) dev_warn((rtwdev)->dev, ##a)
#define rtw89_err(rtwdev, a...) dev_err((rtwdev)->dev, ##a)

184
fw.c
View file

@ -830,11 +830,13 @@ static const struct __fw_feat_cfg fw_feat_tbl[] = {
__CFG_FW_FEAT(RTL8852B, ge, 0, 29, 127, 0, LPS_DACK_BY_C2H_REG),
__CFG_FW_FEAT(RTL8852B, ge, 0, 29, 128, 0, CRASH_TRIGGER_TYPE_1),
__CFG_FW_FEAT(RTL8852B, ge, 0, 29, 128, 0, SCAN_OFFLOAD_EXTRA_OP),
__CFG_FW_FEAT(RTL8852B, ge, 0, 29, 128, 0, BEACON_TRACKING),
__CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, NO_LPS_PG),
__CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, TX_WAKE),
__CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 90, 0, CRASH_TRIGGER_TYPE_0),
__CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 91, 0, SCAN_OFFLOAD),
__CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 110, 0, BEACON_FILTER),
__CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 122, 0, BEACON_TRACKING),
__CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 127, 0, SCAN_OFFLOAD_EXTRA_OP),
__CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 127, 0, LPS_DACK_BY_C2H_REG),
__CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 127, 0, CRASH_TRIGGER_TYPE_1),
@ -846,6 +848,9 @@ static const struct __fw_feat_cfg fw_feat_tbl[] = {
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 56, 10, BEACON_FILTER),
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 80, 0, WOW_REASON_V1),
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 128, 0, BEACON_LOSS_COUNT_V1),
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 128, 0, LPS_DACK_BY_C2H_REG),
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 128, 0, CRASH_TRIGGER_TYPE_1),
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 129, 1, BEACON_TRACKING),
__CFG_FW_FEAT(RTL8922A, ge, 0, 34, 30, 0, CRASH_TRIGGER_TYPE_0),
__CFG_FW_FEAT(RTL8922A, ge, 0, 34, 11, 0, MACID_PAUSE_SLEEP),
__CFG_FW_FEAT(RTL8922A, ge, 0, 34, 35, 0, SCAN_OFFLOAD),
@ -864,6 +869,7 @@ static const struct __fw_feat_cfg fw_feat_tbl[] = {
__CFG_FW_FEAT(RTL8922A, ge, 0, 35, 71, 0, BEACON_LOSS_COUNT_V1),
__CFG_FW_FEAT(RTL8922A, ge, 0, 35, 76, 0, LPS_DACK_BY_C2H_REG),
__CFG_FW_FEAT(RTL8922A, ge, 0, 35, 79, 0, CRASH_TRIGGER_TYPE_1),
__CFG_FW_FEAT(RTL8922A, ge, 0, 35, 80, 0, BEACON_TRACKING),
};
static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw,
@ -1280,6 +1286,18 @@ int rtw89_recognize_regd_from_elm(struct rtw89_dev *rtwdev,
return 0;
}
static
int rtw89_build_afe_pwr_seq_from_elm(struct rtw89_dev *rtwdev,
const struct rtw89_fw_element_hdr *elm,
const union rtw89_fw_element_arg arg)
{
struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
elm_info->afe = elm;
return 0;
}
static const struct rtw89_fw_element_handler __fw_element_handlers[] = {
[RTW89_FW_ELEMENT_ID_BBMCU0] = {__rtw89_fw_recognize_from_elm,
{ .fw_type = RTW89_FW_BBMCU0 }, NULL},
@ -1365,6 +1383,9 @@ static const struct rtw89_fw_element_handler __fw_element_handlers[] = {
[RTW89_FW_ELEMENT_ID_REGD] = {
rtw89_recognize_regd_from_elm, {}, "REGD",
},
[RTW89_FW_ELEMENT_ID_AFE_PWR_SEQ] = {
rtw89_build_afe_pwr_seq_from_elm, {}, "AFE",
},
};
int rtw89_fw_recognize_elements(struct rtw89_dev *rtwdev)
@ -1537,7 +1558,7 @@ static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev,
struct rtw89_fw_hdr *fw_hdr;
struct sk_buff *skb;
u32 truncated;
u32 ret = 0;
int ret;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
@ -3990,6 +4011,93 @@ fail:
}
EXPORT_SYMBOL(rtw89_fw_h2c_update_beacon_be);
int rtw89_fw_h2c_tbtt_tuning(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link, u32 offset)
{
struct rtw89_h2c_tbtt_tuning *h2c;
u32 len = sizeof(*h2c);
struct sk_buff *skb;
int ret;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c tbtt tuning\n");
return -ENOMEM;
}
skb_put(skb, len);
h2c = (struct rtw89_h2c_tbtt_tuning *)skb->data;
h2c->w0 = le32_encode_bits(rtwvif_link->phy_idx, RTW89_H2C_TBTT_TUNING_W0_BAND) |
le32_encode_bits(rtwvif_link->port, RTW89_H2C_TBTT_TUNING_W0_PORT);
h2c->w1 = le32_encode_bits(offset, RTW89_H2C_TBTT_TUNING_W1_SHIFT);
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC, H2C_CL_MAC_PS,
H2C_FUNC_TBTT_TUNING, 0, 0,
len);
ret = rtw89_h2c_tx(rtwdev, skb, false);
if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
return 0;
fail:
dev_kfree_skb_any(skb);
return ret;
}
int rtw89_fw_h2c_pwr_lvl(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link)
{
#define RTW89_BCN_TO_VAL_MIN 4
#define RTW89_BCN_TO_VAL_MAX 64
#define RTW89_DTIM_TO_VAL_MIN 7
#define RTW89_DTIM_TO_VAL_MAX 15
struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track;
struct rtw89_h2c_pwr_lvl *h2c;
u32 len = sizeof(*h2c);
struct sk_buff *skb;
u8 bcn_to_val;
int ret;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c pwr lvl\n");
return -ENOMEM;
}
skb_put(skb, len);
h2c = (struct rtw89_h2c_pwr_lvl *)skb->data;
bcn_to_val = clamp_t(u8, bcn_track->bcn_timeout,
RTW89_BCN_TO_VAL_MIN, RTW89_BCN_TO_VAL_MAX);
h2c->w0 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_PWR_LVL_W0_MACID) |
le32_encode_bits(bcn_to_val, RTW89_H2C_PWR_LVL_W0_BCN_TO_VAL) |
le32_encode_bits(0, RTW89_H2C_PWR_LVL_W0_PS_LVL) |
le32_encode_bits(0, RTW89_H2C_PWR_LVL_W0_TRX_LVL) |
le32_encode_bits(RTW89_DTIM_TO_VAL_MIN,
RTW89_H2C_PWR_LVL_W0_DTIM_TO_VAL);
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC, H2C_CL_MAC_PS,
H2C_FUNC_PS_POWER_LEVEL, 0, 0,
len);
ret = rtw89_h2c_tx(rtwdev, skb, false);
if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
return 0;
fail:
dev_kfree_skb_any(skb);
return ret;
}
int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
struct rtw89_sta_link *rtwsta_link,
@ -6580,6 +6688,40 @@ fail:
return ret;
}
int rtw89_fw_h2c_rf_tas_trigger(struct rtw89_dev *rtwdev, bool enable)
{
struct rtw89_h2c_rf_tas *h2c;
u32 len = sizeof(*h2c);
struct sk_buff *skb;
int ret;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c RF TAS\n");
return -ENOMEM;
}
skb_put(skb, len);
h2c = (struct rtw89_h2c_rf_tas *)skb->data;
h2c->enable = cpu_to_le32(enable);
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
H2C_FUNC_RFK_TAS_OFFLOAD, 0, 0, len);
ret = rtw89_h2c_tx(rtwdev, skb, false);
if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
return 0;
fail:
dev_kfree_skb_any(skb);
return ret;
}
int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev,
u8 h2c_class, u8 h2c_func, u8 *buf, u16 len,
bool rack, bool dack)
@ -6826,8 +6968,9 @@ static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev,
const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_fw_info *fw_info = &rtwdev->fw;
const u32 *c2h_reg = chip->c2h_regs;
u32 ret, timeout;
u32 timeout;
u8 i, val;
int ret;
info->id = RTW89_FWCMD_C2HREG_FUNC_NULL;
@ -6865,7 +7008,7 @@ int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev,
struct rtw89_mac_h2c_info *h2c_info,
struct rtw89_mac_c2h_info *c2h_info)
{
u32 ret;
int ret;
if (h2c_info && h2c_info->id != RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE)
lockdep_assert_wiphy(rtwdev->hw->wiphy);
@ -7123,7 +7266,6 @@ static void rtw89_pno_scan_add_chan_ax(struct rtw89_dev *rtwdev,
struct rtw89_pktofld_info *info;
u8 probe_count = 0;
ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK;
ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS;
ch_info->bw = RTW89_SCAN_WIDTH;
ch_info->tx_pkt = true;
@ -7264,7 +7406,6 @@ static void rtw89_pno_scan_add_chan_be(struct rtw89_dev *rtwdev, int chan_type,
struct rtw89_pktofld_info *info;
u8 probe_count = 0, i;
ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK;
ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS;
ch_info->bw = RTW89_SCAN_WIDTH;
ch_info->tx_null = false;
@ -7553,6 +7694,13 @@ int rtw89_hw_scan_add_chan_list_ax(struct rtw89_dev *rtwdev,
INIT_LIST_HEAD(&list);
list_for_each_entry_safe(ch_info, tmp, &scan_info->chan_list, list) {
/* The operating channel (tx_null == true) should
* not be last in the list, to avoid breaking
* RTL8851BU and RTL8832BU.
*/
if (list_len + 1 == RTW89_SCAN_LIST_LIMIT_AX && ch_info->tx_null)
break;
list_move_tail(&ch_info->list, &list);
list_len++;
@ -8585,9 +8733,10 @@ int rtw89_fw_h2c_wow_gtk_ofld(struct rtw89_dev *rtwdev,
goto fail;
}
/* not support TKIP yet */
h2c->w0 = le32_encode_bits(enable, RTW89_H2C_WOW_GTK_OFLD_W0_EN) |
le32_encode_bits(0, RTW89_H2C_WOW_GTK_OFLD_W0_TKIP_EN) |
le32_encode_bits(!!memchr_inv(gtk_info->txmickey, 0,
sizeof(gtk_info->txmickey)),
RTW89_H2C_WOW_GTK_OFLD_W0_TKIP_EN) |
le32_encode_bits(gtk_info->igtk_keyid ? 1 : 0,
RTW89_H2C_WOW_GTK_OFLD_W0_IEEE80211W_EN) |
le32_encode_bits(macid, RTW89_H2C_WOW_GTK_OFLD_W0_MAC_ID) |
@ -8679,19 +8828,30 @@ int rtw89_fw_h2c_wow_request_aoac(struct rtw89_dev *rtwdev)
static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb,
struct rtw89_wait_info *wait, unsigned int cond)
{
int ret;
struct rtw89_wait_response *prep;
int ret = 0;
lockdep_assert_wiphy(rtwdev->hw->wiphy);
prep = rtw89_wait_for_cond_prep(wait, cond);
if (IS_ERR(prep))
goto out;
ret = rtw89_h2c_tx(rtwdev, skb, false);
if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
dev_kfree_skb_any(skb);
return -EBUSY;
ret = -EBUSY;
goto out;
}
if (test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags))
return 1;
if (test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags)) {
ret = 1;
goto out;
}
return rtw89_wait_for_cond(wait, cond);
out:
return rtw89_wait_for_cond_eval(wait, prep, ret);
}
#define H2C_ADD_MCC_LEN 16

77
fw.h
View file

@ -1602,6 +1602,28 @@ struct rtw89_h2c_bcn_upd_be {
#define RTW89_H2C_BCN_UPD_BE_W7_ECSA_OFST GENMASK(30, 16)
#define RTW89_H2C_BCN_UPD_BE_W7_PROTECTION_KEY_ID BIT(31)
struct rtw89_h2c_tbtt_tuning {
__le32 w0;
__le32 w1;
} __packed;
#define RTW89_H2C_TBTT_TUNING_W0_BAND GENMASK(3, 0)
#define RTW89_H2C_TBTT_TUNING_W0_PORT GENMASK(7, 4)
#define RTW89_H2C_TBTT_TUNING_W1_SHIFT GENMASK(31, 0)
struct rtw89_h2c_pwr_lvl {
__le32 w0;
__le32 w1;
} __packed;
#define RTW89_H2C_PWR_LVL_W0_MACID GENMASK(7, 0)
#define RTW89_H2C_PWR_LVL_W0_BCN_TO_VAL GENMASK(15, 8)
#define RTW89_H2C_PWR_LVL_W0_PS_LVL GENMASK(19, 16)
#define RTW89_H2C_PWR_LVL_W0_TRX_LVL GENMASK(23, 20)
#define RTW89_H2C_PWR_LVL_W0_BCN_TO_LVL GENMASK(27, 24)
#define RTW89_H2C_PWR_LVL_W0_DTIM_TO_VAL GENMASK(31, 28)
#define RTW89_H2C_PWR_LVL_W1_MACID_EXT GENMASK(7, 0)
struct rtw89_h2c_role_maintain {
__le32 w0;
};
@ -3962,6 +3984,7 @@ enum rtw89_fw_element_id {
RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_RU_2GHZ = 24,
RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_RU_5GHZ = 25,
RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_RU_6GHZ = 26,
RTW89_FW_ELEMENT_ID_AFE_PWR_SEQ = 27,
RTW89_FW_ELEMENT_ID_NUM,
};
@ -4067,6 +4090,30 @@ struct rtw89_fw_txpwr_track_cfg {
BIT(RTW89_FW_TXPWR_TRK_TYPE_2G_CCK_A_N) | \
BIT(RTW89_FW_TXPWR_TRK_TYPE_2G_CCK_A_P))
enum rtw89_fw_afe_action {
RTW89_FW_AFE_ACTION_WRITE = 0,
RTW89_FW_AFE_ACTION_DELAY = 1,
RTW89_FW_AFE_ACTION_POLL = 2,
};
enum rtw89_fw_afe_cat {
RTW89_FW_AFE_CAT_BB = 0,
RTW89_FW_AFE_CAT_BB1 = 1,
RTW89_FW_AFE_CAT_MAC = 2,
RTW89_FW_AFE_CAT_MAC1 = 3,
RTW89_FW_AFE_CAT_AFEDIG = 4,
RTW89_FW_AFE_CAT_AFEDIG1 = 5,
};
enum rtw89_fw_afe_class {
RTW89_FW_AFE_CLASS_P0 = 0,
RTW89_FW_AFE_CLASS_P1 = 1,
RTW89_FW_AFE_CLASS_P2 = 2,
RTW89_FW_AFE_CLASS_P3 = 3,
RTW89_FW_AFE_CLASS_P4 = 4,
RTW89_FW_AFE_CLASS_CMN = 5,
};
struct rtw89_fw_element_hdr {
__le32 id; /* enum rtw89_fw_element_id */
__le32 size; /* exclude header size */
@ -4104,6 +4151,17 @@ struct rtw89_fw_element_hdr {
u8 rsvd1[3];
__le16 offset[];
} __packed rfk_log_fmt;
struct {
u8 rsvd[8];
struct rtw89_phy_afe_info {
__le32 action; /* enum rtw89_fw_afe_action */
__le32 cat; /* enum rtw89_fw_afe_cat */
__le32 class; /* enum rtw89_fw_afe_class */
__le32 addr;
__le32 mask;
__le32 val;
} __packed infos[];
} __packed afe;
struct __rtw89_fw_txpwr_element txpwr;
struct __rtw89_fw_regd_element regd;
} __packed u;
@ -4201,6 +4259,8 @@ enum rtw89_ps_h2c_func {
H2C_FUNC_MAC_LPS_PARM = 0x0,
H2C_FUNC_P2P_ACT = 0x1,
H2C_FUNC_IPS_CFG = 0x3,
H2C_FUNC_PS_POWER_LEVEL = 0x7,
H2C_FUNC_TBTT_TUNING = 0xA,
NUM_OF_RTW89_PS_H2C_FUNC,
};
@ -4370,6 +4430,7 @@ enum rtw89_rfk_offload_h2c_func {
H2C_FUNC_RFK_DACK_OFFLOAD = 0x5,
H2C_FUNC_RFK_RXDCK_OFFLOAD = 0x6,
H2C_FUNC_RFK_PRE_NOTIFY = 0x8,
H2C_FUNC_RFK_TAS_OFFLOAD = 0x9,
};
struct rtw89_fw_h2c_rf_get_mccch {
@ -4551,6 +4612,10 @@ struct rtw89_h2c_rf_rxdck_v0 {
u8 rxdck_dbg_en;
} __packed;
struct rtw89_h2c_rf_tas {
__le32 enable;
} __packed;
struct rtw89_h2c_rf_rxdck {
struct rtw89_h2c_rf_rxdck_v0 v0;
u8 is_chl_k;
@ -4683,12 +4748,16 @@ struct rtw89_c2h_rfk_report {
u8 version;
} __packed;
struct rtw89_c2h_rf_tas_info {
struct rtw89_c2h_hdr hdr;
struct rtw89_c2h_rf_tas_rpt_log {
__le32 cur_idx;
__le16 txpwr_history[20];
} __packed;
struct rtw89_c2h_rf_tas_info {
struct rtw89_c2h_hdr hdr;
struct rtw89_c2h_rf_tas_rpt_log content;
} __packed;
#define RTW89_FW_RSVD_PLE_SIZE 0x800
#define RTW89_FW_BACKTRACE_INFO_SIZE 8
@ -4750,6 +4819,9 @@ int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link);
int rtw89_fw_h2c_update_beacon_be(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link);
int rtw89_fw_h2c_tbtt_tuning(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link, u32 offset);
int rtw89_fw_h2c_pwr_lvl(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link);
int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif_link *vif,
struct rtw89_sta_link *rtwsta_link, const u8 *scan_mac_addr);
int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev,
@ -4826,6 +4898,7 @@ int rtw89_fw_h2c_rf_dack(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
const struct rtw89_chan *chan);
int rtw89_fw_h2c_rf_rxdck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
const struct rtw89_chan *chan, bool is_chl_k);
int rtw89_fw_h2c_rf_tas_trigger(struct rtw89_dev *rtwdev, bool enable);
int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev,
u8 h2c_class, u8 h2c_func, u8 *buf, u16 len,
bool rack, bool dack);

72
mac.c
View file

@ -9,6 +9,7 @@
#include "fw.h"
#include "mac.h"
#include "pci.h"
#include "phy.h"
#include "ps.h"
#include "reg.h"
#include "util.h"
@ -177,7 +178,7 @@ int rtw89_mac_dle_dfi_qempty_cfg(struct rtw89_dev *rtwdev,
struct rtw89_mac_dle_dfi_qempty *qempty)
{
struct rtw89_mac_dle_dfi_ctrl ctrl;
u32 ret;
int ret;
ctrl.type = qempty->dle_type;
ctrl.target = DLE_DFI_TYPE_QEMPTY;
@ -985,7 +986,7 @@ static int hfc_upd_ch_info(struct rtw89_dev *rtwdev, u8 ch)
struct rtw89_hfc_ch_info *info = param->ch_info;
const struct rtw89_hfc_ch_cfg *cfg = param->ch_cfg;
u32 val;
u32 ret;
int ret;
ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL);
if (ret)
@ -1176,8 +1177,8 @@ int rtw89_mac_hfc_init(struct rtw89_dev *rtwdev, bool reset, bool en, bool h2c_e
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
const struct rtw89_chip_info *chip = rtwdev->chip;
u32 dma_ch_mask = chip->dma_ch_mask;
int ret = 0;
u8 ch;
u32 ret = 0;
if (reset)
ret = hfc_reset_param(rtwdev);
@ -1193,7 +1194,7 @@ int rtw89_mac_hfc_init(struct rtw89_dev *rtwdev, bool reset, bool en, bool h2c_e
if (!en && h2c_en) {
mac->hfc_h2c_cfg(rtwdev);
mac->hfc_func_en(rtwdev, en, h2c_en);
return ret;
return 0;
}
for (ch = RTW89_DMA_ACH0; ch < RTW89_DMA_H2C; ch++) {
@ -2413,7 +2414,7 @@ static int addr_cam_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx)
static int scheduler_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx)
{
u32 ret;
int ret;
u32 reg;
u32 val;
@ -2954,7 +2955,7 @@ static int rtw89_mac_read_phycap(struct rtw89_dev *rtwdev,
struct rtw89_mac_h2c_info h2c_info = {};
enum rtw89_mac_c2h_type c2h_type;
u8 content_len;
u32 ret;
int ret;
if (chip->chip_gen == RTW89_CHIP_AX)
content_len = 0;
@ -3105,10 +3106,10 @@ int rtw89_mac_setup_phycap(struct rtw89_dev *rtwdev)
static int rtw89_hw_sch_tx_en_h2c(struct rtw89_dev *rtwdev, u8 band,
u16 tx_en_u16, u16 mask_u16)
{
u32 ret;
struct rtw89_mac_c2h_info c2h_info = {0};
struct rtw89_mac_h2c_info h2c_info = {0};
struct rtw89_h2creg_sch_tx_en *sch_tx_en = &h2c_info.u.sch_tx_en;
int ret;
h2c_info.id = RTW89_FWCMD_H2CREG_FUNC_SCH_TX_EN;
h2c_info.content_len = sizeof(*sch_tx_en) - RTW89_H2CREG_HDR_LEN;
@ -4197,6 +4198,7 @@ static const struct rtw89_port_reg rtw89_port_base_ax = {
.ptcl_dbg = R_AX_PTCL_DBG,
.ptcl_dbg_info = R_AX_PTCL_DBG_INFO,
.bcn_drop_all = R_AX_BCN_DROP_ALL0,
.bcn_psr_rpt = R_AX_BCN_PSR_RPT_P0,
.hiq_win = {R_AX_P0MB_HGQ_WINDOW_CFG_0, R_AX_PORT_HGQ_WINDOW_CFG,
R_AX_PORT_HGQ_WINDOW_CFG + 1, R_AX_PORT_HGQ_WINDOW_CFG + 2,
R_AX_PORT_HGQ_WINDOW_CFG + 3},
@ -4649,25 +4651,28 @@ static void rtw89_mac_port_cfg_bcn_early(struct rtw89_dev *rtwdev,
BCN_ERLY_DEF);
}
static void rtw89_mac_port_cfg_tbtt_shift(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link)
static void rtw89_mac_port_cfg_bcn_psr_rpt(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link)
{
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
const struct rtw89_port_reg *p = mac->port_base;
u16 val;
struct ieee80211_bss_conf *bss_conf;
u8 bssid_index;
u32 reg;
if (rtwdev->chip->chip_id != RTL8852C)
return;
rcu_read_lock();
if (rtwvif_link->wifi_role != RTW89_WIFI_ROLE_P2P_CLIENT &&
rtwvif_link->wifi_role != RTW89_WIFI_ROLE_STATION)
return;
bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
if (bss_conf->nontransmitted)
bssid_index = bss_conf->bssid_index;
else
bssid_index = 0;
val = FIELD_PREP(B_AX_TBTT_SHIFT_OFST_MAG, 1) |
B_AX_TBTT_SHIFT_OFST_SIGN;
rcu_read_unlock();
rtw89_write16_port_mask(rtwdev, rtwvif_link, p->tbtt_shift,
B_AX_TBTT_SHIFT_OFST_MASK, val);
reg = rtw89_mac_reg_by_idx(rtwdev, p->bcn_psr_rpt + rtwvif_link->port * 4,
rtwvif_link->mac_idx);
rtw89_write32_mask(rtwdev, reg, B_AX_BCAID_P0_MASK, bssid_index);
}
void rtw89_mac_port_tsf_sync(struct rtw89_dev *rtwdev,
@ -4820,13 +4825,13 @@ int rtw89_mac_port_update(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvi
rtw89_mac_port_cfg_bcn_hold_time(rtwdev, rtwvif_link);
rtw89_mac_port_cfg_bcn_mask_area(rtwdev, rtwvif_link);
rtw89_mac_port_cfg_tbtt_early(rtwdev, rtwvif_link);
rtw89_mac_port_cfg_tbtt_shift(rtwdev, rtwvif_link);
rtw89_mac_port_cfg_bss_color(rtwdev, rtwvif_link);
rtw89_mac_port_cfg_mbssid(rtwdev, rtwvif_link);
rtw89_mac_port_cfg_func_en(rtwdev, rtwvif_link, true);
rtw89_mac_port_tsf_resync_all(rtwdev);
fsleep(BCN_ERLY_SET_DLY);
rtw89_mac_port_cfg_bcn_early(rtwdev, rtwvif_link);
rtw89_mac_port_cfg_bcn_psr_rpt(rtwdev, rtwvif_link);
return 0;
}
@ -5041,6 +5046,8 @@ rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *skb,
if (op_chan) {
rtw89_mac_enable_aps_bcn_by_chan(rtwdev, op_chan, false);
ieee80211_stop_queues(rtwdev->hw);
} else {
rtw89_phy_nhm_get_result(rtwdev, band, chan);
}
return;
case RTW89_SCAN_END_SCAN_NOTIFY:
@ -5071,6 +5078,7 @@ rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *skb,
RTW89_CHANNEL_WIDTH_20);
rtw89_assign_entity_chan(rtwdev, rtwvif_link->chanctx_idx,
&new);
rtw89_phy_nhm_trigger(rtwdev);
}
break;
default:
@ -5235,6 +5243,11 @@ rtw89_mac_c2h_bcn_cnt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
{
}
static void
rtw89_mac_c2h_bcn_upd_done(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
{
}
static void
rtw89_mac_c2h_pkt_ofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *skb_c2h,
u32 len)
@ -5257,6 +5270,11 @@ rtw89_mac_c2h_pkt_ofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *skb_c2h,
rtw89_complete_cond(wait, cond, &data);
}
static void
rtw89_mac_c2h_bcn_resend(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
{
}
static void
rtw89_mac_c2h_tx_duty_rpt(struct rtw89_dev *rtwdev, struct sk_buff *skb_c2h, u32 len)
{
@ -5646,7 +5664,7 @@ void (* const rtw89_mac_c2h_ofld_handler[])(struct rtw89_dev *rtwdev,
[RTW89_MAC_C2H_FUNC_EFUSE_DUMP] = NULL,
[RTW89_MAC_C2H_FUNC_READ_RSP] = NULL,
[RTW89_MAC_C2H_FUNC_PKT_OFLD_RSP] = rtw89_mac_c2h_pkt_ofld_rsp,
[RTW89_MAC_C2H_FUNC_BCN_RESEND] = NULL,
[RTW89_MAC_C2H_FUNC_BCN_RESEND] = rtw89_mac_c2h_bcn_resend,
[RTW89_MAC_C2H_FUNC_MACID_PAUSE] = rtw89_mac_c2h_macid_pause,
[RTW89_MAC_C2H_FUNC_SCANOFLD_RSP] = rtw89_mac_c2h_scanofld_rsp,
[RTW89_MAC_C2H_FUNC_TX_DUTY_RPT] = rtw89_mac_c2h_tx_duty_rpt,
@ -5661,6 +5679,7 @@ void (* const rtw89_mac_c2h_info_handler[])(struct rtw89_dev *rtwdev,
[RTW89_MAC_C2H_FUNC_DONE_ACK] = rtw89_mac_c2h_done_ack,
[RTW89_MAC_C2H_FUNC_C2H_LOG] = rtw89_mac_c2h_log,
[RTW89_MAC_C2H_FUNC_BCN_CNT] = rtw89_mac_c2h_bcn_cnt,
[RTW89_MAC_C2H_FUNC_BCN_UPD_DONE] = rtw89_mac_c2h_bcn_upd_done,
};
static
@ -5813,12 +5832,11 @@ void rtw89_mac_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
case RTW89_MAC_C2H_CLASS_ROLE:
return;
default:
rtw89_info(rtwdev, "MAC c2h class %d not support\n", class);
return;
break;
}
if (!handler) {
rtw89_info(rtwdev, "MAC c2h class %d func %d not support\n", class,
func);
rtw89_info_once(rtwdev, "MAC c2h class %d func %d not support\n",
class, func);
return;
}
handler(rtwdev, skb, len);
@ -6720,7 +6738,7 @@ int rtw89_mac_set_hw_muedca_ctrl(struct rtw89_dev *rtwdev,
u8 mac_idx = rtwvif_link->mac_idx;
u16 set = mac->muedca_ctrl.mask;
u32 reg;
u32 ret;
int ret;
ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
if (ret)
@ -6862,7 +6880,7 @@ int rtw89_mac_cpu_io_rx(struct rtw89_dev *rtwdev, bool wow_enable)
{
struct rtw89_mac_h2c_info h2c_info = {};
struct rtw89_mac_c2h_info c2h_info = {};
u32 ret;
int ret;
if (RTW89_CHK_FW_FEATURE(NO_WOW_CPU_IO_RX, &rtwdev->fw))
return 0;

1
mac.h
View file

@ -419,6 +419,7 @@ enum rtw89_mac_c2h_info_func {
RTW89_MAC_C2H_FUNC_DONE_ACK,
RTW89_MAC_C2H_FUNC_C2H_LOG,
RTW89_MAC_C2H_FUNC_BCN_CNT,
RTW89_MAC_C2H_FUNC_BCN_UPD_DONE = 0x06,
RTW89_MAC_C2H_FUNC_INFO_MAX,
};

View file

@ -1837,6 +1837,40 @@ static void rtw89_set_rekey_data(struct ieee80211_hw *hw,
}
#endif
static int rtw89_ops_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey)
{
struct ieee80211_conf *conf = &hw->conf;
struct rtw89_dev *rtwdev = hw->priv;
struct rtw89_bb_ctx *bb;
if (idx == 0) {
survey->channel = conf->chandef.chan;
survey->filled = SURVEY_INFO_NOISE_DBM;
survey->noise = RTW89_NOISE_DEFAULT;
return 0;
}
rtw89_for_each_active_bb(rtwdev, bb) {
struct rtw89_env_monitor_info *env = &bb->env_monitor;
struct rtw89_nhm_report *rpt;
rpt = list_first_entry_or_null(&env->nhm_rpt_list, typeof(*rpt), list);
if (!rpt)
continue;
survey->filled = SURVEY_INFO_NOISE_DBM;
survey->noise = rpt->noise - MAX_RSSI;
survey->channel = rpt->channel;
list_del_init(&rpt->list);
return 0;
}
return -EINVAL;
}
static void rtw89_ops_rfkill_poll(struct ieee80211_hw *hw)
{
struct rtw89_dev *rtwdev = hw->priv;
@ -1869,6 +1903,7 @@ const struct ieee80211_ops rtw89_ops = {
.sta_state = rtw89_ops_sta_state,
.set_key = rtw89_ops_set_key,
.ampdu_action = rtw89_ops_ampdu_action,
.get_survey = rtw89_ops_get_survey,
.set_rts_threshold = rtw89_ops_set_rts_threshold,
.sta_statistics = rtw89_ops_sta_statistics,
.flush = rtw89_ops_flush,

View file

@ -56,6 +56,7 @@ static const struct rtw89_port_reg rtw89_port_base_be = {
.ptcl_dbg = R_BE_PTCL_DBG,
.ptcl_dbg_info = R_BE_PTCL_DBG_INFO,
.bcn_drop_all = R_BE_BCN_DROP_ALL0,
.bcn_psr_rpt = R_BE_BCN_PSR_RPT_P0,
.hiq_win = {R_BE_P0MB_HGQ_WINDOW_CFG_0, R_BE_PORT_HGQ_WINDOW_CFG,
R_BE_PORT_HGQ_WINDOW_CFG + 1, R_BE_PORT_HGQ_WINDOW_CFG + 2,
R_BE_PORT_HGQ_WINDOW_CFG + 3},

466
pci.c
View file

@ -134,7 +134,7 @@ static void rtw89_pci_release_fwcmd(struct rtw89_dev *rtwdev,
static void rtw89_pci_reclaim_tx_fwcmd(struct rtw89_dev *rtwdev,
struct rtw89_pci *rtwpci)
{
struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12];
struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx.rings[RTW89_TXCH_CH12];
u32 cnt;
cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring);
@ -440,7 +440,7 @@ static int rtw89_pci_poll_rxq_dma(struct rtw89_dev *rtwdev,
int countdown = rtwdev->napi_budget_countdown;
u32 cnt;
rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RXQ];
rx_ring = &rtwpci->rx.rings[RTW89_RXCH_RXQ];
cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring);
if (!cnt)
@ -464,7 +464,8 @@ static void rtw89_pci_tx_status(struct rtw89_dev *rtwdev,
struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb);
struct ieee80211_tx_info *info;
rtw89_core_tx_wait_complete(rtwdev, skb_data, tx_status == RTW89_TX_DONE);
if (rtw89_core_tx_wait_complete(rtwdev, skb_data, tx_status == RTW89_TX_DONE))
return;
info = IEEE80211_SKB_CB(skb);
ieee80211_tx_info_clear_status(info);
@ -568,31 +569,52 @@ static void rtw89_pci_release_txwd_skb(struct rtw89_dev *rtwdev,
rtw89_pci_enqueue_txwd(tx_ring, txwd);
}
static void rtw89_pci_release_rpp(struct rtw89_dev *rtwdev,
struct rtw89_pci_rpp_fmt *rpp)
void rtw89_pci_parse_rpp(struct rtw89_dev *rtwdev, void *_rpp,
struct rtw89_pci_rpp_info *rpp_info)
{
const struct rtw89_pci_rpp_fmt *rpp = _rpp;
rpp_info->seq = le32_get_bits(rpp->dword, RTW89_PCI_RPP_SEQ);
rpp_info->qsel = le32_get_bits(rpp->dword, RTW89_PCI_RPP_QSEL);
rpp_info->tx_status = le32_get_bits(rpp->dword, RTW89_PCI_RPP_TX_STATUS);
rpp_info->txch = rtw89_chip_get_ch_dma(rtwdev, rpp_info->qsel);
}
EXPORT_SYMBOL(rtw89_pci_parse_rpp);
void rtw89_pci_parse_rpp_v1(struct rtw89_dev *rtwdev, void *_rpp,
struct rtw89_pci_rpp_info *rpp_info)
{
const struct rtw89_pci_rpp_fmt_v1 *rpp = _rpp;
rpp_info->seq = le32_get_bits(rpp->w0, RTW89_PCI_RPP_W0_PCIE_SEQ_V1_MASK);
rpp_info->qsel = le32_get_bits(rpp->w1, RTW89_PCI_RPP_W1_QSEL_V1_MASK);
rpp_info->tx_status = le32_get_bits(rpp->w0, RTW89_PCI_RPP_W0_TX_STATUS_V1_MASK);
rpp_info->txch = le32_get_bits(rpp->w0, RTW89_PCI_RPP_W0_DMA_CH_MASK);
}
EXPORT_SYMBOL(rtw89_pci_parse_rpp_v1);
static void rtw89_pci_release_rpp(struct rtw89_dev *rtwdev, void *rpp)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
struct rtw89_pci_tx_ring *tx_ring;
const struct rtw89_pci_info *info = rtwdev->pci_info;
struct rtw89_pci_rpp_info rpp_info = {};
struct rtw89_pci_tx_wd_ring *wd_ring;
struct rtw89_pci_tx_ring *tx_ring;
struct rtw89_pci_tx_wd *txwd;
u16 seq;
u8 qsel, tx_status, txch;
seq = le32_get_bits(rpp->dword, RTW89_PCI_RPP_SEQ);
qsel = le32_get_bits(rpp->dword, RTW89_PCI_RPP_QSEL);
tx_status = le32_get_bits(rpp->dword, RTW89_PCI_RPP_TX_STATUS);
txch = rtw89_core_get_ch_dma(rtwdev, qsel);
info->parse_rpp(rtwdev, rpp, &rpp_info);
if (txch == RTW89_TXCH_CH12) {
if (rpp_info.txch == RTW89_TXCH_CH12) {
rtw89_warn(rtwdev, "should no fwcmd release report\n");
return;
}
tx_ring = &rtwpci->tx_rings[txch];
tx_ring = &rtwpci->tx.rings[rpp_info.txch];
wd_ring = &tx_ring->wd_ring;
txwd = &wd_ring->pages[seq];
txwd = &wd_ring->pages[rpp_info.seq];
rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, seq, tx_status);
rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, rpp_info.seq,
rpp_info.tx_status);
}
static void rtw89_pci_release_pending_txwd_skb(struct rtw89_dev *rtwdev,
@ -617,13 +639,14 @@ static u32 rtw89_pci_release_tx_skbs(struct rtw89_dev *rtwdev,
u32 max_cnt)
{
struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
struct rtw89_pci_rx_info *rx_info;
struct rtw89_pci_rpp_fmt *rpp;
const struct rtw89_pci_info *info = rtwdev->pci_info;
struct rtw89_rx_desc_info desc_info = {};
struct rtw89_pci_rx_info *rx_info;
struct sk_buff *skb;
u32 cnt = 0;
u32 rpp_size = sizeof(struct rtw89_pci_rpp_fmt);
void *rpp;
u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info);
u32 rpp_size = info->rpp_fmt_size;
u32 cnt = 0;
u32 skb_idx;
u32 offset;
int ret;
@ -649,7 +672,7 @@ static u32 rtw89_pci_release_tx_skbs(struct rtw89_dev *rtwdev,
/* first segment has RX desc */
offset = desc_info.offset + desc_info.rxd_len;
for (; offset + rpp_size <= rx_info->len; offset += rpp_size) {
rpp = (struct rtw89_pci_rpp_fmt *)(skb->data + offset);
rpp = skb->data + offset;
rtw89_pci_release_rpp(rtwdev, rpp);
}
@ -694,7 +717,7 @@ static int rtw89_pci_poll_rpq_dma(struct rtw89_dev *rtwdev,
u32 cnt;
int work_done;
rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ];
rx_ring = &rtwpci->rx.rings[RTW89_RXCH_RPQ];
spin_lock_bh(&rtwpci->trx_lock);
@ -724,7 +747,7 @@ static void rtw89_pci_isr_rxd_unavail(struct rtw89_dev *rtwdev,
int i;
for (i = 0; i < RTW89_RXCH_NUM; i++) {
rx_ring = &rtwpci->rx_rings[i];
rx_ring = &rtwpci->rx.rings[i];
bd_ring = &rx_ring->bd_ring;
reg_idx = rtw89_read32(rtwdev, bd_ring->addr.idx);
@ -797,6 +820,29 @@ void rtw89_pci_recognize_intrs_v2(struct rtw89_dev *rtwdev,
}
EXPORT_SYMBOL(rtw89_pci_recognize_intrs_v2);
void rtw89_pci_recognize_intrs_v3(struct rtw89_dev *rtwdev,
struct rtw89_pci *rtwpci,
struct rtw89_pci_isrs *isrs)
{
isrs->ind_isrs = rtw89_read32(rtwdev, R_BE_PCIE_HISR) & rtwpci->ind_intrs;
isrs->halt_c2h_isrs = isrs->ind_isrs & B_BE_HS0ISR_IND_INT ?
rtw89_read32(rtwdev, R_BE_HISR0) & rtwpci->halt_c2h_intrs : 0;
isrs->isrs[1] = rtw89_read32(rtwdev, R_BE_PCIE_DMA_ISR) & rtwpci->intrs[1];
/* isrs[0] is not used, so borrow to store RDU status to share common
* flow in rtw89_pci_interrupt_threadfn().
*/
isrs->isrs[0] = isrs->isrs[1] & (B_BE_PCIE_RDU_CH1_INT |
B_BE_PCIE_RDU_CH0_INT);
if (isrs->halt_c2h_isrs)
rtw89_write32(rtwdev, R_BE_HISR0, isrs->halt_c2h_isrs);
if (isrs->isrs[1])
rtw89_write32(rtwdev, R_BE_PCIE_DMA_ISR, isrs->isrs[1]);
rtw89_write32(rtwdev, R_BE_PCIE_HISR, isrs->ind_isrs);
}
EXPORT_SYMBOL(rtw89_pci_recognize_intrs_v3);
void rtw89_pci_enable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
{
rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs);
@ -844,6 +890,21 @@ void rtw89_pci_disable_intr_v2(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpc
}
EXPORT_SYMBOL(rtw89_pci_disable_intr_v2);
void rtw89_pci_enable_intr_v3(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
{
rtw89_write32(rtwdev, R_BE_HIMR0, rtwpci->halt_c2h_intrs);
rtw89_write32(rtwdev, R_BE_PCIE_DMA_IMR_0_V1, rtwpci->intrs[1]);
rtw89_write32(rtwdev, R_BE_PCIE_HIMR0, rtwpci->ind_intrs);
}
EXPORT_SYMBOL(rtw89_pci_enable_intr_v3);
void rtw89_pci_disable_intr_v3(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
{
rtw89_write32(rtwdev, R_BE_PCIE_HIMR0, 0);
rtw89_write32(rtwdev, R_BE_PCIE_DMA_IMR_0_V1, 0);
}
EXPORT_SYMBOL(rtw89_pci_disable_intr_v3);
static void rtw89_pci_ops_recovery_start(struct rtw89_dev *rtwdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
@ -885,7 +946,7 @@ static irqreturn_t rtw89_pci_interrupt_threadfn(int irq, void *dev)
struct rtw89_dev *rtwdev = dev;
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
const struct rtw89_pci_info *info = rtwdev->pci_info;
const struct rtw89_pci_gen_def *gen_def = info->gen_def;
const struct rtw89_pci_isr_def *isr_def = info->isr_def;
struct rtw89_pci_isrs isrs;
unsigned long flags;
@ -893,13 +954,13 @@ static irqreturn_t rtw89_pci_interrupt_threadfn(int irq, void *dev)
rtw89_chip_recognize_intrs(rtwdev, rtwpci, &isrs);
spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
if (unlikely(isrs.isrs[0] & gen_def->isr_rdu))
if (unlikely(isrs.isrs[0] & isr_def->isr_rdu))
rtw89_pci_isr_rxd_unavail(rtwdev, rtwpci);
if (unlikely(isrs.halt_c2h_isrs & gen_def->isr_halt_c2h))
if (unlikely(isrs.halt_c2h_isrs & isr_def->isr_halt_c2h))
rtw89_ser_notify(rtwdev, rtw89_mac_get_err_status(rtwdev));
if (unlikely(isrs.halt_c2h_isrs & gen_def->isr_wdt_timeout))
if (unlikely(isrs.halt_c2h_isrs & isr_def->isr_wdt_timeout))
rtw89_ser_notify(rtwdev, MAC_AX_ERR_L2_ERR_WDT_TIMEOUT_INT);
if (unlikely(rtwpci->under_recovery))
@ -950,6 +1011,24 @@ exit:
return irqret;
}
#define DEF_TXCHADDRS_TYPE3(gen, ch_idx, txch, v...) \
[RTW89_TXCH_##ch_idx] = { \
.num = R_##gen##_##txch##_TXBD_CFG, \
.idx = R_##gen##_##txch##_TXBD_IDX ##v, \
.bdram = 0, \
.desa_l = 0, \
.desa_h = 0, \
}
#define DEF_TXCHADDRS_TYPE3_GRP_BASE(gen, ch_idx, txch, grp, v...) \
[RTW89_TXCH_##ch_idx] = { \
.num = R_##gen##_##txch##_TXBD_CFG, \
.idx = R_##gen##_##txch##_TXBD_IDX ##v, \
.bdram = 0, \
.desa_l = R_##gen##_##grp##_TXBD_DESA_L, \
.desa_h = R_##gen##_##grp##_TXBD_DESA_H, \
}
#define DEF_TXCHADDRS_TYPE2(gen, ch_idx, txch, v...) \
[RTW89_TXCH_##ch_idx] = { \
.num = R_##gen##_##txch##_TXBD_NUM ##v, \
@ -977,6 +1056,22 @@ exit:
.desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \
}
#define DEF_RXCHADDRS_TYPE3(gen, ch_idx, rxch, v...) \
[RTW89_RXCH_##ch_idx] = { \
.num = R_##gen##_RX_##rxch##_RXBD_CONFIG, \
.idx = R_##gen##_##ch_idx##0_RXBD_IDX ##v, \
.desa_l = 0, \
.desa_h = 0, \
}
#define DEF_RXCHADDRS_TYPE3_GRP_BASE(gen, ch_idx, rxch, grp, v...) \
[RTW89_RXCH_##ch_idx] = { \
.num = R_##gen##_RX_##rxch##_RXBD_CONFIG, \
.idx = R_##gen##_##ch_idx##0_RXBD_IDX ##v, \
.desa_l = R_##gen##_##grp##_RXBD_DESA_L, \
.desa_h = R_##gen##_##grp##_RXBD_DESA_H, \
}
#define DEF_RXCHADDRS(gen, ch_idx, rxch, v...) \
[RTW89_RXCH_##ch_idx] = { \
.num = R_##gen##_##rxch##_RXBD_NUM ##v, \
@ -1054,8 +1149,36 @@ const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_be = {
};
EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set_be);
const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_be_v1 = {
.tx = {
DEF_TXCHADDRS_TYPE3_GRP_BASE(BE, ACH0, CH0, ACQ, _V1),
/* no CH1 */
DEF_TXCHADDRS_TYPE3(BE, ACH2, CH2, _V1),
/* no CH3 */
DEF_TXCHADDRS_TYPE3(BE, ACH4, CH4, _V1),
/* no CH5 */
DEF_TXCHADDRS_TYPE3(BE, ACH6, CH6, _V1),
/* no CH7 */
DEF_TXCHADDRS_TYPE3_GRP_BASE(BE, CH8, CH8, NACQ, _V1),
/* no CH9 */
DEF_TXCHADDRS_TYPE3(BE, CH10, CH10, _V1),
/* no CH11 */
DEF_TXCHADDRS_TYPE3(BE, CH12, CH12, _V1),
},
.rx = {
DEF_RXCHADDRS_TYPE3_GRP_BASE(BE, RXQ, CH0, HOST0, _V1),
DEF_RXCHADDRS_TYPE3(BE, RPQ, CH1, _V1),
},
};
EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set_be_v1);
#undef DEF_TXCHADDRS_TYPE3
#undef DEF_TXCHADDRS_TYPE3_GRP_BASE
#undef DEF_TXCHADDRS_TYPE2
#undef DEF_TXCHADDRS_TYPE1
#undef DEF_TXCHADDRS
#undef DEF_RXCHADDRS_TYPE3
#undef DEF_RXCHADDRS_TYPE3_GRP_BASE
#undef DEF_RXCHADDRS
static int rtw89_pci_get_txch_addrs(struct rtw89_dev *rtwdev,
@ -1101,7 +1224,7 @@ static
u32 __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(struct rtw89_dev *rtwdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12];
struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx.rings[RTW89_TXCH_CH12];
u32 cnt;
spin_lock_bh(&rtwpci->trx_lock);
@ -1117,7 +1240,7 @@ u32 __rtw89_pci_check_and_reclaim_tx_resource_noio(struct rtw89_dev *rtwdev,
u8 txch)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx.rings[txch];
struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
u32 cnt;
@ -1134,7 +1257,7 @@ static u32 __rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev,
u8 txch)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx.rings[txch];
struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
const struct rtw89_chip_info *chip = rtwdev->chip;
u32 bd_cnt, wd_cnt, min_cnt = 0;
@ -1142,7 +1265,7 @@ static u32 __rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev,
enum rtw89_debug_mask debug_mask;
u32 cnt;
rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ];
rx_ring = &rtwpci->rx.rings[RTW89_RXCH_RPQ];
spin_lock_bh(&rtwpci->trx_lock);
bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
@ -1227,7 +1350,7 @@ static void rtw89_pci_tx_bd_ring_update(struct rtw89_dev *rtwdev, struct rtw89_p
static void rtw89_pci_ops_tx_kick_off(struct rtw89_dev *rtwdev, u8 txch)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx.rings[txch];
if (rtwdev->hci.paused) {
set_bit(txch, rtwpci->kick_map);
@ -1247,7 +1370,7 @@ static void rtw89_pci_tx_kick_off_pending(struct rtw89_dev *rtwdev)
if (!test_and_clear_bit(txch, rtwpci->kick_map))
continue;
tx_ring = &rtwpci->tx_rings[txch];
tx_ring = &rtwpci->tx.rings[txch];
__rtw89_pci_tx_kick_off(rtwdev, tx_ring);
}
}
@ -1255,7 +1378,7 @@ static void rtw89_pci_tx_kick_off_pending(struct rtw89_dev *rtwdev)
static void __pci_flush_txch(struct rtw89_dev *rtwdev, u8 txch, bool drop)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx.rings[txch];
struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
u32 cur_idx, cur_rp;
u8 i;
@ -1371,7 +1494,6 @@ static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev,
struct pci_dev *pdev = rtwpci->pdev;
struct sk_buff *skb = tx_req->skb;
struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb);
struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb);
bool en_wd_info = desc_info->en_wd_info;
u32 txwd_len;
u32 txwp_len;
@ -1387,7 +1509,6 @@ static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev,
}
tx_data->dma = dma;
rcu_assign_pointer(skb_data->wait, NULL);
txwp_len = sizeof(*txwp_info);
txwd_len = chip->txwd_body_size;
@ -1521,7 +1642,7 @@ static int rtw89_pci_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_req
return -EINVAL;
}
tx_ring = &rtwpci->tx_rings[txch];
tx_ring = &rtwpci->tx.rings[txch];
spin_lock_bh(&rtwpci->trx_lock);
n_avail_txbd = rtw89_pci_get_avail_txbd_num(tx_ring);
@ -1607,6 +1728,41 @@ static void rtw89_pci_init_wp_16sel(struct rtw89_dev *rtwdev)
}
}
static u16 rtw89_pci_enc_bd_cfg(struct rtw89_dev *rtwdev, u16 bd_num,
u32 dma_offset)
{
u16 dma_offset_sel;
u16 num_sel;
/* B_BE_TX_NUM_SEL_MASK, B_BE_RX_NUM_SEL_MASK:
* 0 -> 0
* 1 -> 64 = 2^6
* 2 -> 128 = 2^7
* ...
* 7 -> 4096 = 2^12
*/
num_sel = ilog2(bd_num) - 5;
if (hweight16(bd_num) != 1)
rtw89_warn(rtwdev, "bd_num %u is not power of 2\n", bd_num);
/* B_BE_TX_START_OFFSET_MASK, B_BE_RX_START_OFFSET_MASK:
* 0 -> 0 = 0 * 2^9
* 1 -> 512 = 1 * 2^9
* 2 -> 1024 = 2 * 2^9
* 3 -> 1536 = 3 * 2^9
* ...
* 255 -> 130560 = 255 * 2^9
*/
dma_offset_sel = dma_offset >> 9;
if (dma_offset % 512)
rtw89_warn(rtwdev, "offset %u is not multiple of 512\n", dma_offset);
return u16_encode_bits(num_sel, B_BE_TX_NUM_SEL_MASK) |
u16_encode_bits(dma_offset_sel, B_BE_TX_START_OFFSET_MASK);
}
static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
@ -1616,10 +1772,12 @@ static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev)
struct rtw89_pci_rx_ring *rx_ring;
struct rtw89_pci_dma_ring *bd_ring;
const struct rtw89_pci_bd_ram *bd_ram;
dma_addr_t group_dma_base = 0;
u16 num_or_offset;
u32 addr_desa_l;
u32 addr_bdram;
u32 addr_num;
u32 addr_idx;
u32 addr_bdram;
u32 addr_desa_l;
u32 val32;
int i;
@ -1627,7 +1785,7 @@ static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev)
if (info->tx_dma_ch_mask & BIT(i))
continue;
tx_ring = &rtwpci->tx_rings[i];
tx_ring = &rtwpci->tx.rings[i];
bd_ring = &tx_ring->bd_ring;
bd_ram = bd_ram_table ? &bd_ram_table[i] : NULL;
addr_num = bd_ring->addr.num;
@ -1636,7 +1794,18 @@ static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev)
bd_ring->wp = 0;
bd_ring->rp = 0;
rtw89_write16(rtwdev, addr_num, bd_ring->len);
if (info->group_bd_addr) {
if (addr_desa_l)
group_dma_base = bd_ring->dma;
num_or_offset =
rtw89_pci_enc_bd_cfg(rtwdev, bd_ring->len,
bd_ring->dma - group_dma_base);
} else {
num_or_offset = bd_ring->len;
}
rtw89_write16(rtwdev, addr_num, num_or_offset);
if (addr_bdram && bd_ram) {
val32 = FIELD_PREP(BDRAM_SIDX_MASK, bd_ram->start_idx) |
FIELD_PREP(BDRAM_MAX_MASK, bd_ram->max_num) |
@ -1644,12 +1813,14 @@ static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev)
rtw89_write32(rtwdev, addr_bdram, val32);
}
rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma);
rtw89_write32(rtwdev, addr_desa_l + 4, upper_32_bits(bd_ring->dma));
if (addr_desa_l) {
rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma);
rtw89_write32(rtwdev, addr_desa_l + 4, upper_32_bits(bd_ring->dma));
}
}
for (i = 0; i < RTW89_RXCH_NUM; i++) {
rx_ring = &rtwpci->rx_rings[i];
rx_ring = &rtwpci->rx.rings[i];
bd_ring = &rx_ring->bd_ring;
addr_num = bd_ring->addr.num;
addr_idx = bd_ring->addr.idx;
@ -1663,9 +1834,22 @@ static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev)
rx_ring->diliver_desc.ready = false;
rx_ring->target_rx_tag = 0;
rtw89_write16(rtwdev, addr_num, bd_ring->len);
rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma);
rtw89_write32(rtwdev, addr_desa_l + 4, upper_32_bits(bd_ring->dma));
if (info->group_bd_addr) {
if (addr_desa_l)
group_dma_base = bd_ring->dma;
num_or_offset =
rtw89_pci_enc_bd_cfg(rtwdev, bd_ring->len,
bd_ring->dma - group_dma_base);
} else {
num_or_offset = bd_ring->len;
}
rtw89_write16(rtwdev, addr_num, num_or_offset);
if (addr_desa_l) {
rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma);
rtw89_write32(rtwdev, addr_desa_l + 4, upper_32_bits(bd_ring->dma));
}
if (info->rx_ring_eq_is_full)
rtw89_write16(rtwdev, addr_idx, bd_ring->wp);
@ -1698,7 +1882,7 @@ void rtw89_pci_ops_reset(struct rtw89_dev *rtwdev)
skb_queue_len(&rtwpci->h2c_queue), true);
continue;
}
rtw89_pci_release_tx_ring(rtwdev, &rtwpci->tx_rings[txch]);
rtw89_pci_release_tx_ring(rtwdev, &rtwpci->tx.rings[txch]);
}
spin_unlock_bh(&rtwpci->trx_lock);
}
@ -1774,14 +1958,14 @@ void rtw89_pci_switch_bd_idx_addr(struct rtw89_dev *rtwdev, bool low_power)
return;
for (i = 0; i < RTW89_TXCH_NUM; i++) {
tx_ring = &rtwpci->tx_rings[i];
tx_ring = &rtwpci->tx.rings[i];
tx_ring->bd_ring.addr.idx = low_power ?
bd_idx_addr->tx_bd_addrs[i] :
dma_addr_set->tx[i].idx;
}
for (i = 0; i < RTW89_RXCH_NUM; i++) {
rx_ring = &rtwpci->rx_rings[i];
rx_ring = &rtwpci->rx.rings[i];
rx_ring->bd_ring.addr.idx = low_power ?
bd_idx_addr->rx_bd_addrs[i] :
dma_addr_set->rx[i].idx;
@ -2725,7 +2909,7 @@ static int rtw89_pci_poll_rxdma_ch_idle_ax(struct rtw89_dev *rtwdev)
static int rtw89_pci_poll_dma_all_idle(struct rtw89_dev *rtwdev)
{
u32 ret;
int ret;
ret = rtw89_pci_poll_txdma_ch_idle_ax(rtwdev);
if (ret) {
@ -3211,15 +3395,6 @@ static void rtw89_pci_free_tx_ring(struct rtw89_dev *rtwdev,
struct pci_dev *pdev,
struct rtw89_pci_tx_ring *tx_ring)
{
int ring_sz;
u8 *head;
dma_addr_t dma;
head = tx_ring->bd_ring.head;
dma = tx_ring->bd_ring.dma;
ring_sz = tx_ring->bd_ring.desc_size * tx_ring->bd_ring.len;
dma_free_coherent(&pdev->dev, ring_sz, head, dma);
tx_ring->bd_ring.head = NULL;
}
@ -3227,6 +3402,7 @@ static void rtw89_pci_free_tx_rings(struct rtw89_dev *rtwdev,
struct pci_dev *pdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
struct rtw89_pci_dma_pool *bd_pool = &rtwpci->tx.bd_pool;
const struct rtw89_pci_info *info = rtwdev->pci_info;
struct rtw89_pci_tx_ring *tx_ring;
int i;
@ -3234,10 +3410,12 @@ static void rtw89_pci_free_tx_rings(struct rtw89_dev *rtwdev,
for (i = 0; i < RTW89_TXCH_NUM; i++) {
if (info->tx_dma_ch_mask & BIT(i))
continue;
tx_ring = &rtwpci->tx_rings[i];
tx_ring = &rtwpci->tx.rings[i];
rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring);
rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring);
}
dma_free_coherent(&pdev->dev, bd_pool->size, bd_pool->head, bd_pool->dma);
}
static void rtw89_pci_free_rx_ring(struct rtw89_dev *rtwdev,
@ -3248,8 +3426,6 @@ static void rtw89_pci_free_rx_ring(struct rtw89_dev *rtwdev,
struct sk_buff *skb;
dma_addr_t dma;
u32 buf_sz;
u8 *head;
int ring_sz = rx_ring->bd_ring.desc_size * rx_ring->bd_ring.len;
int i;
buf_sz = rx_ring->buf_sz;
@ -3265,10 +3441,6 @@ static void rtw89_pci_free_rx_ring(struct rtw89_dev *rtwdev,
rx_ring->buf[i] = NULL;
}
head = rx_ring->bd_ring.head;
dma = rx_ring->bd_ring.dma;
dma_free_coherent(&pdev->dev, ring_sz, head, dma);
rx_ring->bd_ring.head = NULL;
}
@ -3276,13 +3448,16 @@ static void rtw89_pci_free_rx_rings(struct rtw89_dev *rtwdev,
struct pci_dev *pdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
struct rtw89_pci_dma_pool *bd_pool = &rtwpci->rx.bd_pool;
struct rtw89_pci_rx_ring *rx_ring;
int i;
for (i = 0; i < RTW89_RXCH_NUM; i++) {
rx_ring = &rtwpci->rx_rings[i];
rx_ring = &rtwpci->rx.rings[i];
rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring);
}
dma_free_coherent(&pdev->dev, bd_pool->size, bd_pool->head, bd_pool->dma);
}
static void rtw89_pci_free_trx_rings(struct rtw89_dev *rtwdev,
@ -3374,12 +3549,10 @@ static int rtw89_pci_alloc_tx_ring(struct rtw89_dev *rtwdev,
struct pci_dev *pdev,
struct rtw89_pci_tx_ring *tx_ring,
u32 desc_size, u32 len,
enum rtw89_tx_channel txch)
enum rtw89_tx_channel txch,
void *head, dma_addr_t dma)
{
const struct rtw89_pci_ch_dma_addr *txch_addr;
int ring_sz = desc_size * len;
u8 *head;
dma_addr_t dma;
int ret;
ret = rtw89_pci_alloc_tx_wd_ring(rtwdev, pdev, tx_ring, txch);
@ -3394,12 +3567,6 @@ static int rtw89_pci_alloc_tx_ring(struct rtw89_dev *rtwdev,
goto err_free_wd_ring;
}
head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
if (!head) {
ret = -ENOMEM;
goto err_free_wd_ring;
}
INIT_LIST_HEAD(&tx_ring->busy_pages);
tx_ring->bd_ring.head = head;
tx_ring->bd_ring.dma = dma;
@ -3422,25 +3589,48 @@ static int rtw89_pci_alloc_tx_rings(struct rtw89_dev *rtwdev,
struct pci_dev *pdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
struct rtw89_pci_dma_pool *bd_pool = &rtwpci->tx.bd_pool;
const struct rtw89_pci_info *info = rtwdev->pci_info;
struct rtw89_pci_tx_ring *tx_ring;
u32 desc_size;
u32 len;
u32 i, tx_allocated;
dma_addr_t dma;
u32 desc_size;
u32 ring_sz;
u32 pool_sz;
u32 ch_num;
void *head;
u32 len;
int ret;
BUILD_BUG_ON(RTW89_PCI_TXBD_NUM_MAX % 16);
desc_size = sizeof(struct rtw89_pci_tx_bd_32);
len = RTW89_PCI_TXBD_NUM_MAX;
ch_num = RTW89_TXCH_NUM - hweight32(info->tx_dma_ch_mask);
ring_sz = desc_size * len;
pool_sz = ring_sz * ch_num;
head = dma_alloc_coherent(&pdev->dev, pool_sz, &dma, GFP_KERNEL);
if (!head)
return -ENOMEM;
bd_pool->head = head;
bd_pool->dma = dma;
bd_pool->size = pool_sz;
for (i = 0; i < RTW89_TXCH_NUM; i++) {
if (info->tx_dma_ch_mask & BIT(i))
continue;
tx_ring = &rtwpci->tx_rings[i];
desc_size = sizeof(struct rtw89_pci_tx_bd_32);
len = RTW89_PCI_TXBD_NUM_MAX;
tx_ring = &rtwpci->tx.rings[i];
ret = rtw89_pci_alloc_tx_ring(rtwdev, pdev, tx_ring,
desc_size, len, i);
desc_size, len, i, head, dma);
if (ret) {
rtw89_err(rtwdev, "failed to alloc tx ring %d\n", i);
goto err_free;
}
head += ring_sz;
dma += ring_sz;
}
return 0;
@ -3448,24 +3638,24 @@ static int rtw89_pci_alloc_tx_rings(struct rtw89_dev *rtwdev,
err_free:
tx_allocated = i;
for (i = 0; i < tx_allocated; i++) {
tx_ring = &rtwpci->tx_rings[i];
tx_ring = &rtwpci->tx.rings[i];
rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring);
}
dma_free_coherent(&pdev->dev, bd_pool->size, bd_pool->head, bd_pool->dma);
return ret;
}
static int rtw89_pci_alloc_rx_ring(struct rtw89_dev *rtwdev,
struct pci_dev *pdev,
struct rtw89_pci_rx_ring *rx_ring,
u32 desc_size, u32 len, u32 rxch)
u32 desc_size, u32 len, u32 rxch,
void *head, dma_addr_t dma)
{
const struct rtw89_pci_info *info = rtwdev->pci_info;
const struct rtw89_pci_ch_dma_addr *rxch_addr;
struct sk_buff *skb;
u8 *head;
dma_addr_t dma;
int ring_sz = desc_size * len;
int buf_sz = RTW89_PCI_RX_BUF_SIZE;
int i, allocated;
int ret;
@ -3476,12 +3666,6 @@ static int rtw89_pci_alloc_rx_ring(struct rtw89_dev *rtwdev,
return ret;
}
head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
if (!head) {
ret = -ENOMEM;
goto err;
}
rx_ring->bd_ring.head = head;
rx_ring->bd_ring.dma = dma;
rx_ring->bd_ring.len = len;
@ -3530,12 +3714,8 @@ err_free:
rx_ring->buf[i] = NULL;
}
head = rx_ring->bd_ring.head;
dma = rx_ring->bd_ring.dma;
dma_free_coherent(&pdev->dev, ring_sz, head, dma);
rx_ring->bd_ring.head = NULL;
err:
return ret;
}
@ -3543,22 +3723,43 @@ static int rtw89_pci_alloc_rx_rings(struct rtw89_dev *rtwdev,
struct pci_dev *pdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
struct rtw89_pci_dma_pool *bd_pool = &rtwpci->rx.bd_pool;
struct rtw89_pci_rx_ring *rx_ring;
u32 desc_size;
u32 len;
int i, rx_allocated;
dma_addr_t dma;
u32 desc_size;
u32 ring_sz;
u32 pool_sz;
void *head;
u32 len;
int ret;
desc_size = sizeof(struct rtw89_pci_rx_bd_32);
len = RTW89_PCI_RXBD_NUM_MAX;
ring_sz = desc_size * len;
pool_sz = ring_sz * RTW89_RXCH_NUM;
head = dma_alloc_coherent(&pdev->dev, pool_sz, &dma, GFP_KERNEL);
if (!head)
return -ENOMEM;
bd_pool->head = head;
bd_pool->dma = dma;
bd_pool->size = pool_sz;
for (i = 0; i < RTW89_RXCH_NUM; i++) {
rx_ring = &rtwpci->rx_rings[i];
desc_size = sizeof(struct rtw89_pci_rx_bd_32);
len = RTW89_PCI_RXBD_NUM_MAX;
rx_ring = &rtwpci->rx.rings[i];
ret = rtw89_pci_alloc_rx_ring(rtwdev, pdev, rx_ring,
desc_size, len, i);
desc_size, len, i,
head, dma);
if (ret) {
rtw89_err(rtwdev, "failed to alloc rx ring %d\n", i);
goto err_free;
}
head += ring_sz;
dma += ring_sz;
}
return 0;
@ -3566,10 +3767,12 @@ static int rtw89_pci_alloc_rx_rings(struct rtw89_dev *rtwdev,
err_free:
rx_allocated = i;
for (i = 0; i < rx_allocated; i++) {
rx_ring = &rtwpci->rx_rings[i];
rx_ring = &rtwpci->rx.rings[i];
rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring);
}
dma_free_coherent(&pdev->dev, bd_pool->size, bd_pool->head, bd_pool->dma);
return ret;
}
@ -3776,6 +3979,40 @@ void rtw89_pci_config_intr_mask_v2(struct rtw89_dev *rtwdev)
}
EXPORT_SYMBOL(rtw89_pci_config_intr_mask_v2);
static void rtw89_pci_recovery_intr_mask_v3(struct rtw89_dev *rtwdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
rtwpci->ind_intrs = B_BE_HS0_IND_INT_EN0;
rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN;
rtwpci->intrs[0] = 0;
rtwpci->intrs[1] = 0;
}
static void rtw89_pci_default_intr_mask_v3(struct rtw89_dev *rtwdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
rtwpci->ind_intrs = B_BE_HS0_IND_INT_EN0;
rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN;
rtwpci->intrs[0] = 0;
rtwpci->intrs[1] = B_BE_PCIE_RDU_CH1_IMR |
B_BE_PCIE_RDU_CH0_IMR |
B_BE_PCIE_RX_RX0P2_IMR0_V1 |
B_BE_PCIE_RX_RPQ0_IMR0_V1;
}
void rtw89_pci_config_intr_mask_v3(struct rtw89_dev *rtwdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
if (rtwpci->under_recovery)
rtw89_pci_recovery_intr_mask_v3(rtwdev);
else
rtw89_pci_default_intr_mask_v3(rtwdev);
}
EXPORT_SYMBOL(rtw89_pci_config_intr_mask_v3);
static int rtw89_pci_request_irq(struct rtw89_dev *rtwdev,
struct pci_dev *pdev)
{
@ -4158,7 +4395,7 @@ static int rtw89_pci_lv1rst_stop_dma_ax(struct rtw89_dev *rtwdev)
static int rtw89_pci_lv1rst_start_dma_ax(struct rtw89_dev *rtwdev)
{
u32 ret;
int ret;
if (rtwdev->chip->chip_id == RTL8852C)
return 0;
@ -4172,7 +4409,7 @@ static int rtw89_pci_lv1rst_start_dma_ax(struct rtw89_dev *rtwdev)
return ret;
rtw89_pci_ctrl_dma_all(rtwdev, true);
return ret;
return 0;
}
static int rtw89_pci_ops_mac_lv1_recovery(struct rtw89_dev *rtwdev,
@ -4228,18 +4465,18 @@ static int rtw89_pci_napi_poll(struct napi_struct *napi, int budget)
struct rtw89_dev *rtwdev = container_of(napi, struct rtw89_dev, napi);
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
const struct rtw89_pci_info *info = rtwdev->pci_info;
const struct rtw89_pci_gen_def *gen_def = info->gen_def;
const struct rtw89_pci_isr_def *isr_def = info->isr_def;
unsigned long flags;
int work_done;
rtwdev->napi_budget_countdown = budget;
rtw89_write32(rtwdev, gen_def->isr_clear_rpq.addr, gen_def->isr_clear_rpq.data);
rtw89_write32(rtwdev, isr_def->isr_clear_rpq.addr, isr_def->isr_clear_rpq.data);
work_done = rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown);
if (work_done == budget)
return budget;
rtw89_write32(rtwdev, gen_def->isr_clear_rxq.addr, gen_def->isr_clear_rxq.data);
rtw89_write32(rtwdev, isr_def->isr_clear_rxq.addr, isr_def->isr_clear_rxq.data);
work_done += rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown);
if (work_done < budget && napi_complete_done(napi, work_done)) {
spin_lock_irqsave(&rtwpci->irq_lock, flags);
@ -4394,14 +4631,17 @@ const struct pci_error_handlers rtw89_pci_err_handler = {
};
EXPORT_SYMBOL(rtw89_pci_err_handler);
const struct rtw89_pci_gen_def rtw89_pci_gen_ax = {
const struct rtw89_pci_isr_def rtw89_pci_isr_ax = {
.isr_rdu = B_AX_RDU_INT,
.isr_halt_c2h = B_AX_HALT_C2H_INT_EN,
.isr_wdt_timeout = B_AX_WDT_TIMEOUT_INT_EN,
.isr_clear_rpq = {R_AX_PCIE_HISR00, B_AX_RPQDMA_INT | B_AX_RPQBD_FULL_INT},
.isr_clear_rxq = {R_AX_PCIE_HISR00, B_AX_RXP1DMA_INT | B_AX_RXDMA_INT |
B_AX_RDU_INT},
};
EXPORT_SYMBOL(rtw89_pci_isr_ax);
const struct rtw89_pci_gen_def rtw89_pci_gen_ax = {
.mac_pre_init = rtw89_pci_ops_mac_pre_init_ax,
.mac_pre_deinit = rtw89_pci_ops_mac_pre_deinit_ax,
.mac_post_init = rtw89_pci_ops_mac_post_init_ax,

128
pci.h
View file

@ -372,6 +372,14 @@
#define B_BE_HS0ISR_IND_INT BIT(0)
#define R_BE_PCIE_DMA_IMR_0_V1 0x30B8
#define B_BE_PCIE_RDU_CH7_IMR BIT(31)
#define B_BE_PCIE_RDU_CH6_IMR BIT(30)
#define B_BE_PCIE_RDU_CH5_IMR BIT(29)
#define B_BE_PCIE_RDU_CH4_IMR BIT(28)
#define B_BE_PCIE_RDU_CH3_IMR BIT(27)
#define B_BE_PCIE_RDU_CH2_IMR BIT(26)
#define B_BE_PCIE_RDU_CH1_IMR BIT(25)
#define B_BE_PCIE_RDU_CH0_IMR BIT(24)
#define B_BE_PCIE_RX_RX1P1_IMR0_V1 BIT(23)
#define B_BE_PCIE_RX_RX0P1_IMR0_V1 BIT(22)
#define B_BE_PCIE_RX_ROQ1_IMR0_V1 BIT(21)
@ -397,6 +405,14 @@
#define B_BE_PCIE_TX_CH0_IMR0 BIT(0)
#define R_BE_PCIE_DMA_ISR 0x30BC
#define B_BE_PCIE_RDU_CH7_INT BIT(31)
#define B_BE_PCIE_RDU_CH6_INT BIT(30)
#define B_BE_PCIE_RDU_CH5_INT BIT(29)
#define B_BE_PCIE_RDU_CH4_INT BIT(28)
#define B_BE_PCIE_RDU_CH3_INT BIT(27)
#define B_BE_PCIE_RDU_CH2_INT BIT(26)
#define B_BE_PCIE_RDU_CH1_INT BIT(25)
#define B_BE_PCIE_RDU_CH0_INT BIT(24)
#define B_BE_PCIE_RX_RX1P1_ISR_V1 BIT(23)
#define B_BE_PCIE_RX_RX0P1_ISR_V1 BIT(22)
#define B_BE_PCIE_RX_ROQ1_ISR_V1 BIT(21)
@ -426,9 +442,13 @@
#define B_BE_RDU_CH4_INT_IMR_V1 BIT(29)
#define B_BE_RDU_CH3_INT_IMR_V1 BIT(28)
#define B_BE_RDU_CH2_INT_IMR_V1 BIT(27)
#define B_BE_RDU_CH1_INT_EN_V2 BIT(27)
#define B_BE_RDU_CH1_INT_IMR_V1 BIT(26)
#define B_BE_RDU_CH0_INT_EN_V2 BIT(26)
#define B_BE_RDU_CH0_INT_IMR_V1 BIT(25)
#define B_BE_RXDMA_STUCK_INT_EN_V2 BIT(25)
#define B_BE_RXDMA_STUCK_INT_EN_V1 BIT(24)
#define B_BE_TXDMA_STUCK_INT_EN_V2 BIT(24)
#define B_BE_TXDMA_STUCK_INT_EN_V1 BIT(23)
#define B_BE_TXDMA_CH14_INT_EN_V1 BIT(22)
#define B_BE_TXDMA_CH13_INT_EN_V1 BIT(21)
@ -459,9 +479,13 @@
#define B_BE_RDU_CH4_INT_V1 BIT(29)
#define B_BE_RDU_CH3_INT_V1 BIT(28)
#define B_BE_RDU_CH2_INT_V1 BIT(27)
#define B_BE_RDU_CH1_INT_V2 BIT(27)
#define B_BE_RDU_CH1_INT_V1 BIT(26)
#define B_BE_RDU_CH0_INT_V2 BIT(26)
#define B_BE_RDU_CH0_INT_V1 BIT(25)
#define B_BE_RXDMA_STUCK_INT_V2 BIT(25)
#define B_BE_RXDMA_STUCK_INT_V1 BIT(24)
#define B_BE_TXDMA_STUCK_INT_V2 BIT(24)
#define B_BE_TXDMA_STUCK_INT_V1 BIT(23)
#define B_BE_TXDMA_CH14_INT_V1 BIT(22)
#define B_BE_TXDMA_CH13_INT_V1 BIT(21)
@ -784,9 +808,25 @@
#define R_BE_CH13_TXBD_NUM_V1 0xB04C
#define R_BE_CH14_TXBD_NUM_V1 0xB04E
#define R_BE_CH0_TXBD_CFG 0xB030
#define R_BE_CH2_TXBD_CFG 0xB034
#define R_BE_CH4_TXBD_CFG 0xB038
#define R_BE_CH6_TXBD_CFG 0xB03C
#define R_BE_CH8_TXBD_CFG 0xB040
#define R_BE_CH10_TXBD_CFG 0xB044
#define R_BE_CH12_TXBD_CFG 0xB048
#define B_BE_TX_FLAG BIT(14)
#define B_BE_TX_START_OFFSET_MASK GENMASK(12, 4)
#define B_BE_TX_NUM_SEL_MASK GENMASK(2, 0)
#define R_BE_RXQ0_RXBD_NUM_V1 0xB050
#define R_BE_RPQ0_RXBD_NUM_V1 0xB052
#define R_BE_RX_CH0_RXBD_CONFIG 0xB050
#define R_BE_RX_CH1_RXBD_CONFIG 0xB052
#define B_BE_RX_START_OFFSET_MASK GENMASK(11, 4)
#define B_BE_RX_NUM_SEL_MASK GENMASK(2, 0)
#define R_BE_CH0_TXBD_IDX_V1 0xB100
#define R_BE_CH1_TXBD_IDX_V1 0xB104
#define R_BE_CH2_TXBD_IDX_V1 0xB108
@ -837,11 +877,25 @@
#define R_BE_CH14_TXBD_DESA_L_V1 0xB270
#define R_BE_CH14_TXBD_DESA_H_V1 0xB274
#define R_BE_ACQ_TXBD_DESA_L 0xB200
#define B_BE_TX_ACQ_DESA_L_MASK GENMASK(31, 3)
#define R_BE_ACQ_TXBD_DESA_H 0xB204
#define B_BE_TX_ACQ_DESA_H_MASK GENMASK(7, 0)
#define R_BE_NACQ_TXBD_DESA_L 0xB240
#define B_BE_TX_NACQ_DESA_L_MASK GENMASK(31, 3)
#define R_BE_NACQ_TXBD_DESA_H 0xB244
#define B_BE_TX_NACQ_DESA_H_MASK GENMASK(7, 0)
#define R_BE_RXQ0_RXBD_DESA_L_V1 0xB300
#define R_BE_RXQ0_RXBD_DESA_H_V1 0xB304
#define R_BE_RPQ0_RXBD_DESA_L_V1 0xB308
#define R_BE_RPQ0_RXBD_DESA_H_V1 0xB30C
#define R_BE_HOST0_RXBD_DESA_L 0xB300
#define B_BE_RX_HOST0_DESA_L_MASK GENMASK(31, 3)
#define R_BE_HOST0_RXBD_DESA_H 0xB304
#define B_BE_RX_HOST0_DESA_H_MASK GENMASK(7, 0)
#define R_BE_WP_ADDR_H_SEL0_3_V1 0xB420
#define R_BE_WP_ADDR_H_SEL4_7_V1 0xB424
#define R_BE_WP_ADDR_H_SEL8_11_V1 0xB428
@ -1249,7 +1303,7 @@ struct rtw89_pci_bd_idx_addr {
};
struct rtw89_pci_ch_dma_addr {
u32 num;
u32 num; /* also `offset` addr for group_bd_addr design */
u32 idx;
u32 bdram;
u32 desa_l;
@ -1267,13 +1321,15 @@ struct rtw89_pci_bd_ram {
u8 min_num;
};
struct rtw89_pci_gen_def {
struct rtw89_pci_isr_def {
u32 isr_rdu;
u32 isr_halt_c2h;
u32 isr_wdt_timeout;
struct rtw89_reg2_def isr_clear_rpq;
struct rtw89_reg2_def isr_clear_rxq;
};
struct rtw89_pci_gen_def {
int (*mac_pre_init)(struct rtw89_dev *rtwdev);
int (*mac_pre_deinit)(struct rtw89_dev *rtwdev);
int (*mac_post_init)(struct rtw89_dev *rtwdev);
@ -1309,8 +1365,16 @@ struct rtw89_pci_ssid_quirk {
unsigned long bitmap; /* bitmap of rtw89_quirks */
};
struct rtw89_pci_rpp_info {
u16 seq;
u8 qsel;
u8 tx_status;
u8 txch;
};
struct rtw89_pci_info {
const struct rtw89_pci_gen_def *gen_def;
const struct rtw89_pci_isr_def *isr_def;
enum mac_ax_bd_trunc_mode txbd_trunc_mode;
enum mac_ax_bd_trunc_mode rxbd_trunc_mode;
enum mac_ax_rxbd_mode rxbd_mode;
@ -1328,6 +1392,8 @@ struct rtw89_pci_info {
bool rx_ring_eq_is_full;
bool check_rx_tag;
bool no_rxbd_fs;
bool group_bd_addr;
u32 rpp_fmt_size;
u32 init_cfg_reg;
u32 txhci_en_bit;
@ -1357,6 +1423,8 @@ struct rtw89_pci_info {
u32 (*fill_txaddr_info)(struct rtw89_dev *rtwdev,
void *txaddr_info_addr, u32 total_len,
dma_addr_t dma, u8 *add_info_nr);
void (*parse_rpp)(struct rtw89_dev *rtwdev, void *rpp,
struct rtw89_pci_rpp_info *rpp_info);
void (*config_intr_mask)(struct rtw89_dev *rtwdev);
void (*enable_intr)(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci);
void (*disable_intr)(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci);
@ -1430,6 +1498,19 @@ struct rtw89_pci_rpp_fmt {
__le32 dword;
} __packed;
#define RTW89_PCI_RPP_W0_MACID_V1_MASK GENMASK(9, 0)
#define RTW89_PCI_RPP_W0_DMA_CH_MASK GENMASK(13, 10)
#define RTW89_PCI_RPP_W0_TX_STATUS_V1_MASK GENMASK(16, 14)
#define RTW89_PCI_RPP_W0_PCIE_SEQ_V1_MASK GENMASK(31, 17)
#define RTW89_PCI_RPP_W1_QSEL_V1_MASK GENMASK(5, 0)
#define RTW89_PCI_RPP_W1_TID_IND BIT(6)
#define RTW89_PCI_RPP_W1_CHANGE_LINK BIT(7)
struct rtw89_pci_rpp_fmt_v1 {
__le32 w0;
__le32 w1;
} __packed;
struct rtw89_pci_rx_bd_32 {
__le16 buf_size;
__le16 opt;
@ -1468,6 +1549,12 @@ struct rtw89_pci_dma_ring {
u32 rp; /* hw idx */
};
struct rtw89_pci_dma_pool {
void *head;
dma_addr_t dma;
u32 size;
};
struct rtw89_pci_tx_wd_ring {
void *head;
dma_addr_t dma;
@ -1497,6 +1584,11 @@ struct rtw89_pci_tx_ring {
u64 tx_mac_id_drop;
};
struct rtw89_pci_tx_rings {
struct rtw89_pci_tx_ring rings[RTW89_TXCH_NUM];
struct rtw89_pci_dma_pool bd_pool;
};
struct rtw89_pci_rx_ring {
struct rtw89_pci_dma_ring bd_ring;
struct sk_buff *buf[RTW89_PCI_RXBD_NUM_MAX];
@ -1506,6 +1598,11 @@ struct rtw89_pci_rx_ring {
u32 target_rx_tag:13;
};
struct rtw89_pci_rx_rings {
struct rtw89_pci_rx_ring rings[RTW89_RXCH_NUM];
struct rtw89_pci_dma_pool bd_pool;
};
struct rtw89_pci_isrs {
u32 ind_isrs;
u32 halt_c2h_isrs;
@ -1523,8 +1620,8 @@ struct rtw89_pci {
bool low_power;
bool under_recovery;
bool enable_dac;
struct rtw89_pci_tx_ring tx_rings[RTW89_TXCH_NUM];
struct rtw89_pci_rx_ring rx_rings[RTW89_RXCH_NUM];
struct rtw89_pci_tx_rings tx;
struct rtw89_pci_rx_rings rx;
struct sk_buff_head h2c_queue;
struct sk_buff_head h2c_release_queue;
DECLARE_BITMAP(kick_map, RTW89_TXCH_NUM);
@ -1537,10 +1634,7 @@ struct rtw89_pci {
static inline struct rtw89_pci_rx_info *RTW89_PCI_RX_SKB_CB(struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
BUILD_BUG_ON(sizeof(struct rtw89_pci_tx_data) >
sizeof(info->status.status_driver_data));
BUILD_BUG_ON(sizeof(struct rtw89_pci_rx_info) > sizeof(skb->cb));
return (struct rtw89_pci_rx_info *)skb->cb;
}
@ -1571,6 +1665,10 @@ static inline struct rtw89_pci_tx_data *RTW89_PCI_TX_SKB_CB(struct sk_buff *skb)
{
struct rtw89_tx_skb_data *data = RTW89_TX_SKB_CB(skb);
BUILD_BUG_ON(sizeof(struct rtw89_tx_skb_data) +
sizeof(struct rtw89_pci_tx_data) >
sizeof_field(struct ieee80211_tx_info, driver_data));
return (struct rtw89_pci_tx_data *)data->hci_priv;
}
@ -1626,8 +1724,12 @@ extern const struct pci_error_handlers rtw89_pci_err_handler;
extern const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set;
extern const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_v1;
extern const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_be;
extern const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_be_v1;
extern const struct rtw89_pci_bd_ram rtw89_bd_ram_table_dual[RTW89_TXCH_NUM];
extern const struct rtw89_pci_bd_ram rtw89_bd_ram_table_single[RTW89_TXCH_NUM];
extern const struct rtw89_pci_isr_def rtw89_pci_isr_ax;
extern const struct rtw89_pci_isr_def rtw89_pci_isr_be;
extern const struct rtw89_pci_isr_def rtw89_pci_isr_be_v1;
extern const struct rtw89_pci_gen_def rtw89_pci_gen_ax;
extern const struct rtw89_pci_gen_def rtw89_pci_gen_be;
@ -1646,16 +1748,23 @@ u32 rtw89_pci_fill_txaddr_info(struct rtw89_dev *rtwdev,
u32 rtw89_pci_fill_txaddr_info_v1(struct rtw89_dev *rtwdev,
void *txaddr_info_addr, u32 total_len,
dma_addr_t dma, u8 *add_info_nr);
void rtw89_pci_parse_rpp(struct rtw89_dev *rtwdev, void *_rpp,
struct rtw89_pci_rpp_info *rpp_info);
void rtw89_pci_parse_rpp_v1(struct rtw89_dev *rtwdev, void *_rpp,
struct rtw89_pci_rpp_info *rpp_info);
void rtw89_pci_ctrl_dma_all(struct rtw89_dev *rtwdev, bool enable);
void rtw89_pci_config_intr_mask(struct rtw89_dev *rtwdev);
void rtw89_pci_config_intr_mask_v1(struct rtw89_dev *rtwdev);
void rtw89_pci_config_intr_mask_v2(struct rtw89_dev *rtwdev);
void rtw89_pci_config_intr_mask_v3(struct rtw89_dev *rtwdev);
void rtw89_pci_enable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci);
void rtw89_pci_disable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci);
void rtw89_pci_enable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci);
void rtw89_pci_disable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci);
void rtw89_pci_enable_intr_v2(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci);
void rtw89_pci_disable_intr_v2(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci);
void rtw89_pci_enable_intr_v3(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci);
void rtw89_pci_disable_intr_v3(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci);
void rtw89_pci_recognize_intrs(struct rtw89_dev *rtwdev,
struct rtw89_pci *rtwpci,
struct rtw89_pci_isrs *isrs);
@ -1665,6 +1774,9 @@ void rtw89_pci_recognize_intrs_v1(struct rtw89_dev *rtwdev,
void rtw89_pci_recognize_intrs_v2(struct rtw89_dev *rtwdev,
struct rtw89_pci *rtwpci,
struct rtw89_pci_isrs *isrs);
void rtw89_pci_recognize_intrs_v3(struct rtw89_dev *rtwdev,
struct rtw89_pci *rtwpci,
struct rtw89_pci_isrs *isrs);
static inline
u32 rtw89_chip_fill_txaddr_info(struct rtw89_dev *rtwdev,

View file

@ -175,10 +175,10 @@ static void rtw89_pci_clr_idx_all_be(struct rtw89_dev *rtwdev)
rtw89_write32(rtwdev, R_BE_RXBD_RWPTR_CLR1_V1,
B_BE_CLR_RXQ0_IDX | B_BE_CLR_RPQ0_IDX);
rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RXQ];
rx_ring = &rtwpci->rx.rings[RTW89_RXCH_RXQ];
rtw89_write16(rtwdev, R_BE_RXQ0_RXBD_IDX_V1, rx_ring->bd_ring.len - 1);
rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ];
rx_ring = &rtwpci->rx.rings[RTW89_RXCH_RPQ];
rtw89_write16(rtwdev, R_BE_RPQ0_RXBD_IDX_V1, rx_ring->bd_ring.len - 1);
}
@ -665,13 +665,25 @@ static int __maybe_unused rtw89_pci_resume_be(struct device *dev)
SIMPLE_DEV_PM_OPS(rtw89_pm_ops_be, rtw89_pci_suspend_be, rtw89_pci_resume_be);
EXPORT_SYMBOL(rtw89_pm_ops_be);
const struct rtw89_pci_gen_def rtw89_pci_gen_be = {
const struct rtw89_pci_isr_def rtw89_pci_isr_be = {
.isr_rdu = B_BE_RDU_CH1_INT_V1 | B_BE_RDU_CH0_INT_V1,
.isr_halt_c2h = B_BE_HALT_C2H_INT,
.isr_wdt_timeout = B_BE_WDT_TIMEOUT_INT,
.isr_clear_rpq = {R_BE_PCIE_DMA_ISR, B_BE_PCIE_RX_RPQ0_ISR_V1},
.isr_clear_rxq = {R_BE_PCIE_DMA_ISR, B_BE_PCIE_RX_RX0P2_ISR_V1},
};
EXPORT_SYMBOL(rtw89_pci_isr_be);
const struct rtw89_pci_isr_def rtw89_pci_isr_be_v1 = {
.isr_rdu = B_BE_PCIE_RDU_CH1_INT | B_BE_PCIE_RDU_CH0_INT,
.isr_halt_c2h = B_BE_HALT_C2H_INT,
.isr_wdt_timeout = B_BE_WDT_TIMEOUT_INT,
.isr_clear_rpq = {R_BE_PCIE_DMA_ISR, B_BE_PCIE_RX_RPQ0_ISR_V1},
.isr_clear_rxq = {R_BE_PCIE_DMA_ISR, B_BE_PCIE_RX_RX0P2_ISR_V1},
};
EXPORT_SYMBOL(rtw89_pci_isr_be_v1);
const struct rtw89_pci_gen_def rtw89_pci_gen_be = {
.mac_pre_init = rtw89_pci_ops_mac_pre_init_be,
.mac_pre_deinit = rtw89_pci_ops_mac_pre_deinit_be,
.mac_post_init = rtw89_pci_ops_mac_post_init_be,

476
phy.c
View file

@ -1702,6 +1702,91 @@ void rtw89_phy_init_bb_reg(struct rtw89_dev *rtwdev)
rtw89_phy_bb_reset(rtwdev);
}
void rtw89_phy_init_bb_afe(struct rtw89_dev *rtwdev)
{
struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
const struct rtw89_fw_element_hdr *afe_elm = elm_info->afe;
const struct rtw89_phy_afe_info *info;
u32 action, cat, class;
u32 addr, mask, val;
u32 poll, rpt;
u32 n, i;
if (!afe_elm)
return;
n = le32_to_cpu(afe_elm->size) / sizeof(*info);
for (i = 0; i < n; i++) {
info = &afe_elm->u.afe.infos[i];
class = le32_to_cpu(info->class);
switch (class) {
case RTW89_FW_AFE_CLASS_P0:
case RTW89_FW_AFE_CLASS_P1:
case RTW89_FW_AFE_CLASS_CMN:
/* Currently support two paths */
break;
case RTW89_FW_AFE_CLASS_P2:
case RTW89_FW_AFE_CLASS_P3:
case RTW89_FW_AFE_CLASS_P4:
default:
rtw89_warn(rtwdev, "unexpected AFE class %u\n", class);
continue;
}
addr = le32_to_cpu(info->addr);
mask = le32_to_cpu(info->mask);
val = le32_to_cpu(info->val);
cat = le32_to_cpu(info->cat);
action = le32_to_cpu(info->action);
switch (action) {
case RTW89_FW_AFE_ACTION_WRITE:
switch (cat) {
case RTW89_FW_AFE_CAT_MAC:
case RTW89_FW_AFE_CAT_MAC1:
rtw89_write32_mask(rtwdev, addr, mask, val);
break;
case RTW89_FW_AFE_CAT_AFEDIG:
case RTW89_FW_AFE_CAT_AFEDIG1:
rtw89_write32_mask(rtwdev, addr, mask, val);
break;
case RTW89_FW_AFE_CAT_BB:
rtw89_phy_write32_idx(rtwdev, addr, mask, val, RTW89_PHY_0);
break;
case RTW89_FW_AFE_CAT_BB1:
rtw89_phy_write32_idx(rtwdev, addr, mask, val, RTW89_PHY_1);
break;
default:
rtw89_warn(rtwdev,
"unexpected AFE writing action %u\n", action);
break;
}
break;
case RTW89_FW_AFE_ACTION_POLL:
for (poll = 0; poll <= 10; poll++) {
/*
* For CAT_BB, AFE reads register with mcu_offset 0,
* so both CAT_MAC and CAT_BB use the same method.
*/
rpt = rtw89_read32_mask(rtwdev, addr, mask);
if (rpt == val)
goto poll_done;
fsleep(1);
}
rtw89_warn(rtwdev, "failed to poll AFE cat=%u addr=0x%x mask=0x%x\n",
cat, addr, mask);
poll_done:
break;
case RTW89_FW_AFE_ACTION_DELAY:
fsleep(addr);
break;
}
}
}
static u32 rtw89_phy_nctl_poll(struct rtw89_dev *rtwdev)
{
rtw89_phy_write32(rtwdev, 0x8080, 0x4);
@ -2943,7 +3028,7 @@ static void __rtw89_phy_c2h_ra_rpt_iter(struct rtw89_sta_link *rtwsta_link,
}
if (mode == RTW89_RA_RPT_MODE_LEGACY) {
valid = rtw89_ra_report_to_bitrate(rtwdev, rate, &legacy_bitrate);
valid = rtw89_legacy_rate_to_bitrate(rtwdev, rate, &legacy_bitrate);
if (!valid)
return;
}
@ -3087,6 +3172,34 @@ void (* const rtw89_phy_c2h_dm_handler[])(struct rtw89_dev *rtwdev,
[RTW89_PHY_C2H_DM_FUNC_FW_SCAN] = rtw89_phy_c2h_fw_scan_rpt,
};
static
void rtw89_phy_c2h_rfk_tas_pwr(struct rtw89_dev *rtwdev,
const struct rtw89_c2h_rf_tas_rpt_log *content)
{
const enum rtw89_sar_sources src = rtwdev->sar.src;
struct rtw89_tas_info *tas = &rtwdev->tas;
u64 linear = 0;
u32 i, cur_idx;
s16 txpwr;
if (!tas->enable || src == RTW89_SAR_SOURCE_NONE)
return;
cur_idx = le32_to_cpu(content->cur_idx);
for (i = 0; i < cur_idx; i++) {
txpwr = le16_to_cpu(content->txpwr_history[i]);
linear += rtw89_db_quarter_to_linear(txpwr);
rtw89_debug(rtwdev, RTW89_DBG_SAR,
"tas: index: %u, txpwr: %d\n", i, txpwr);
}
if (cur_idx == 0)
tas->instant_txpwr = rtw89_db_to_linear(0);
else
tas->instant_txpwr = DIV_ROUND_DOWN_ULL(linear, cur_idx);
}
static void rtw89_phy_c2h_rfk_rpt_log(struct rtw89_dev *rtwdev,
enum rtw89_phy_c2h_rfk_log_func func,
void *content, u16 len)
@ -3338,6 +3451,13 @@ static void rtw89_phy_c2h_rfk_rpt_log(struct rtw89_dev *rtwdev,
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt power_d[1] = %*ph\n",
(int)sizeof(txgapk->power_d[1]), txgapk->power_d[1]);
return;
case RTW89_PHY_C2H_RFK_LOG_FUNC_TAS_PWR:
if (len != sizeof(struct rtw89_c2h_rf_tas_rpt_log))
goto out;
rtw89_phy_c2h_rfk_tas_pwr(rtwdev, content);
return;
default:
break;
}
@ -3390,9 +3510,6 @@ static void rtw89_phy_c2h_rfk_log(struct rtw89_dev *rtwdev, struct sk_buff *c2h,
u16 chunk_len;
bool handled;
if (!rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK))
return;
log_ptr += sizeof(*c2h_hdr);
len -= sizeof(*c2h_hdr);
@ -3469,6 +3586,13 @@ rtw89_phy_c2h_rfk_log_txgapk(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32
RTW89_PHY_C2H_RFK_LOG_FUNC_TXGAPK, "TXGAPK");
}
static void
rtw89_phy_c2h_rfk_log_tas_pwr(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
{
rtw89_phy_c2h_rfk_log(rtwdev, c2h, len,
RTW89_PHY_C2H_RFK_LOG_FUNC_TAS_PWR, "TAS");
}
static
void (* const rtw89_phy_c2h_rfk_log_handler[])(struct rtw89_dev *rtwdev,
struct sk_buff *c2h, u32 len) = {
@ -3478,6 +3602,7 @@ void (* const rtw89_phy_c2h_rfk_log_handler[])(struct rtw89_dev *rtwdev,
[RTW89_PHY_C2H_RFK_LOG_FUNC_RXDCK] = rtw89_phy_c2h_rfk_log_rxdck,
[RTW89_PHY_C2H_RFK_LOG_FUNC_TSSI] = rtw89_phy_c2h_rfk_log_tssi,
[RTW89_PHY_C2H_RFK_LOG_FUNC_TXGAPK] = rtw89_phy_c2h_rfk_log_txgapk,
[RTW89_PHY_C2H_RFK_LOG_FUNC_TAS_PWR] = rtw89_phy_c2h_rfk_log_tas_pwr,
};
static
@ -3540,39 +3665,19 @@ rtw89_phy_c2h_rfk_report_state(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u3
}
static void
rtw89_phy_c2h_rfk_log_tas_pwr(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
rtw89_phy_c2h_rfk_report_tas_pwr(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
{
const struct rtw89_c2h_rf_tas_info *rf_tas =
const struct rtw89_c2h_rf_tas_info *report =
(const struct rtw89_c2h_rf_tas_info *)c2h->data;
const enum rtw89_sar_sources src = rtwdev->sar.src;
struct rtw89_tas_info *tas = &rtwdev->tas;
u64 linear = 0;
u32 i, cur_idx;
s16 txpwr;
if (!tas->enable || src == RTW89_SAR_SOURCE_NONE)
return;
cur_idx = le32_to_cpu(rf_tas->cur_idx);
for (i = 0; i < cur_idx; i++) {
txpwr = (s16)le16_to_cpu(rf_tas->txpwr_history[i]);
linear += rtw89_db_quarter_to_linear(txpwr);
rtw89_debug(rtwdev, RTW89_DBG_SAR,
"tas: index: %u, txpwr: %d\n", i, txpwr);
}
if (cur_idx == 0)
tas->instant_txpwr = rtw89_db_to_linear(0);
else
tas->instant_txpwr = DIV_ROUND_DOWN_ULL(linear, cur_idx);
rtw89_phy_c2h_rfk_tas_pwr(rtwdev, &report->content);
}
static
void (* const rtw89_phy_c2h_rfk_report_handler[])(struct rtw89_dev *rtwdev,
struct sk_buff *c2h, u32 len) = {
[RTW89_PHY_C2H_RFK_REPORT_FUNC_STATE] = rtw89_phy_c2h_rfk_report_state,
[RTW89_PHY_C2H_RFK_LOG_TAS_PWR] = rtw89_phy_c2h_rfk_log_tas_pwr,
[RTW89_PHY_C2H_RFK_REPORT_FUNC_TAS_PWR] = rtw89_phy_c2h_rfk_report_tas_pwr,
};
bool rtw89_phy_c2h_chk_atomic(struct rtw89_dev *rtwdev, u8 class, u8 func)
@ -3626,12 +3731,11 @@ void rtw89_phy_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
handler = rtw89_phy_c2h_dm_handler[func];
break;
default:
rtw89_info(rtwdev, "PHY c2h class %d not support\n", class);
return;
break;
}
if (!handler) {
rtw89_info(rtwdev, "PHY c2h class %d func %d not support\n", class,
func);
rtw89_info_once(rtwdev, "PHY c2h class %d func %d not support\n",
class, func);
return;
}
handler(rtwdev, skb, len);
@ -5497,6 +5601,34 @@ static void rtw89_phy_ifs_clm_set_th_reg(struct rtw89_dev *rtwdev,
i + 1, env->ifs_clm_th_l[i], env->ifs_clm_th_h[i]);
}
static void __rtw89_phy_nhm_setting_init(struct rtw89_dev *rtwdev,
struct rtw89_bb_ctx *bb)
{
const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
struct rtw89_env_monitor_info *env = &bb->env_monitor;
const struct rtw89_ccx_regs *ccx = phy->ccx;
env->nhm_include_cca = false;
env->nhm_mntr_time = 0;
env->nhm_sum = 0;
rtw89_phy_write32_idx_set(rtwdev, ccx->nhm_config, ccx->nhm_en_mask, bb->phy_idx);
rtw89_phy_write32_idx_set(rtwdev, ccx->nhm_method, ccx->nhm_pwr_method_msk,
bb->phy_idx);
}
void rtw89_phy_nhm_setting_init(struct rtw89_dev *rtwdev)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_bb_ctx *bb;
if (!chip->support_noise)
return;
rtw89_for_each_active_bb(rtwdev, bb)
__rtw89_phy_nhm_setting_init(rtwdev, bb);
}
static void rtw89_phy_ifs_clm_setting_init(struct rtw89_dev *rtwdev,
struct rtw89_bb_ctx *bb)
{
@ -5558,7 +5690,7 @@ static int rtw89_phy_ccx_racing_ctrl(struct rtw89_dev *rtwdev,
}
static void rtw89_phy_ccx_trigger(struct rtw89_dev *rtwdev,
struct rtw89_bb_ctx *bb)
struct rtw89_bb_ctx *bb, u8 sel)
{
const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
struct rtw89_env_monitor_info *env = &bb->env_monitor;
@ -5568,10 +5700,17 @@ static void rtw89_phy_ccx_trigger(struct rtw89_dev *rtwdev,
bb->phy_idx);
rtw89_phy_write32_idx(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 0,
bb->phy_idx);
if (sel & RTW89_PHY_ENV_MON_NHM)
rtw89_phy_write32_idx_clr(rtwdev, ccx->nhm_config,
ccx->nhm_en_mask, bb->phy_idx);
rtw89_phy_write32_idx(rtwdev, ccx->ifs_cnt_addr, ccx->ifs_clm_cnt_clear_mask, 1,
bb->phy_idx);
rtw89_phy_write32_idx(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 1,
bb->phy_idx);
if (sel & RTW89_PHY_ENV_MON_NHM)
rtw89_phy_write32_idx_set(rtwdev, ccx->nhm_config,
ccx->nhm_en_mask, bb->phy_idx);
env->ccx_ongoing = true;
}
@ -5642,6 +5781,125 @@ static void rtw89_phy_ifs_clm_get_utility(struct rtw89_dev *rtwdev,
env->ifs_clm_cca_avg[i]);
}
static u8 rtw89_nhm_weighted_avg(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb)
{
struct rtw89_env_monitor_info *env = &bb->env_monitor;
u8 nhm_weight[RTW89_NHM_RPT_NUM];
u32 nhm_weighted_sum = 0;
u8 weight_zero;
u8 i;
if (env->nhm_sum == 0)
return 0;
weight_zero = clamp_t(u16, env->nhm_th[0] - RTW89_NHM_WEIGHT_OFFSET, 0, U8_MAX);
for (i = 0; i < RTW89_NHM_RPT_NUM; i++) {
if (i == 0)
nhm_weight[i] = weight_zero;
else if (i == (RTW89_NHM_RPT_NUM - 1))
nhm_weight[i] = env->nhm_th[i - 1] + RTW89_NHM_WEIGHT_OFFSET;
else
nhm_weight[i] = (env->nhm_th[i - 1] + env->nhm_th[i]) / 2;
}
if (rtwdev->chip->chip_id == RTL8852A || rtwdev->chip->chip_id == RTL8852B ||
rtwdev->chip->chip_id == RTL8852C) {
if (env->nhm_th[RTW89_NHM_TH_NUM - 1] == RTW89_NHM_WA_TH) {
nhm_weight[RTW89_NHM_RPT_NUM - 1] =
env->nhm_th[RTW89_NHM_TH_NUM - 2] +
RTW89_NHM_WEIGHT_OFFSET;
nhm_weight[RTW89_NHM_RPT_NUM - 2] =
nhm_weight[RTW89_NHM_RPT_NUM - 1];
}
env->nhm_result[0] += env->nhm_result[RTW89_NHM_RPT_NUM - 1];
env->nhm_result[RTW89_NHM_RPT_NUM - 1] = 0;
}
for (i = 0; i < RTW89_NHM_RPT_NUM; i++)
nhm_weighted_sum += env->nhm_result[i] * nhm_weight[i];
return (nhm_weighted_sum / env->nhm_sum) >> RTW89_NHM_TH_FACTOR;
}
static void __rtw89_phy_nhm_get_result(struct rtw89_dev *rtwdev,
struct rtw89_bb_ctx *bb, enum rtw89_band hw_band,
u16 ch_hw_value)
{
const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
struct rtw89_env_monitor_info *env = &bb->env_monitor;
const struct rtw89_chip_info *chip = rtwdev->chip;
const struct rtw89_ccx_regs *ccx = phy->ccx;
struct ieee80211_supported_band *sband;
const struct rtw89_reg_def *nhm_rpt;
enum nl80211_band band;
u32 sum = 0;
u8 chan_idx;
u8 nhm_pwr;
u8 i;
if (!rtw89_phy_read32_idx(rtwdev, ccx->nhm, ccx->nhm_ready, bb->phy_idx)) {
rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "[NHM] Get NHM report Fail\n");
return;
}
for (i = 0; i < RTW89_NHM_RPT_NUM; i++) {
nhm_rpt = &(*chip->nhm_report)[i];
env->nhm_result[i] =
rtw89_phy_read32_idx(rtwdev, nhm_rpt->addr,
nhm_rpt->mask, bb->phy_idx);
sum += env->nhm_result[i];
}
env->nhm_sum = sum;
nhm_pwr = rtw89_nhm_weighted_avg(rtwdev, bb);
if (!ch_hw_value)
return;
band = rtw89_hw_to_nl80211_band(hw_band);
sband = rtwdev->hw->wiphy->bands[band];
if (!sband)
return;
for (chan_idx = 0; chan_idx < sband->n_channels; chan_idx++) {
struct ieee80211_channel *channel;
struct rtw89_nhm_report *rpt;
struct list_head *nhm_list;
channel = &sband->channels[chan_idx];
if (channel->hw_value != ch_hw_value)
continue;
rpt = &env->nhm_his[hw_band][chan_idx];
nhm_list = &env->nhm_rpt_list;
rpt->channel = channel;
rpt->noise = nhm_pwr;
if (list_empty(&rpt->list))
list_add_tail(&rpt->list, nhm_list);
return;
}
rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "[NHM] channel not found\n");
}
void rtw89_phy_nhm_get_result(struct rtw89_dev *rtwdev, enum rtw89_band hw_band,
u16 ch_hw_value)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_bb_ctx *bb;
if (!chip->support_noise)
return;
rtw89_for_each_active_bb(rtwdev, bb)
__rtw89_phy_nhm_get_result(rtwdev, bb, hw_band, ch_hw_value);
}
static bool rtw89_phy_ifs_clm_get_result(struct rtw89_dev *rtwdev,
struct rtw89_bb_ctx *bb)
{
@ -5742,6 +6000,107 @@ static bool rtw89_phy_ifs_clm_get_result(struct rtw89_dev *rtwdev,
return true;
}
static void rtw89_phy_nhm_th_update(struct rtw89_dev *rtwdev,
struct rtw89_bb_ctx *bb)
{
struct rtw89_env_monitor_info *env = &bb->env_monitor;
static const u8 nhm_th_11k[RTW89_NHM_RPT_NUM] = {
18, 21, 24, 27, 30, 35, 40, 45, 50, 55, 60, 0
};
const struct rtw89_chip_info *chip = rtwdev->chip;
const struct rtw89_reg_def *nhm_th;
u8 i;
for (i = 0; i < RTW89_NHM_RPT_NUM; i++)
env->nhm_th[i] = nhm_th_11k[i] << RTW89_NHM_TH_FACTOR;
if (chip->chip_id == RTL8852A || chip->chip_id == RTL8852B ||
chip->chip_id == RTL8852C)
env->nhm_th[RTW89_NHM_TH_NUM - 1] = RTW89_NHM_WA_TH;
for (i = 0; i < RTW89_NHM_TH_NUM; i++) {
nhm_th = &(*chip->nhm_th)[i];
rtw89_phy_write32_idx(rtwdev, nhm_th->addr, nhm_th->mask,
env->nhm_th[i], bb->phy_idx);
}
}
static int rtw89_phy_nhm_set(struct rtw89_dev *rtwdev,
struct rtw89_bb_ctx *bb,
struct rtw89_ccx_para_info *para)
{
const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
struct rtw89_env_monitor_info *env = &bb->env_monitor;
const struct rtw89_ccx_regs *ccx = phy->ccx;
u32 unit_idx = 0;
u32 period = 0;
if (para->mntr_time == 0) {
rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
"[NHM] MNTR_TIME is 0\n");
return -EINVAL;
}
if (rtw89_phy_ccx_racing_ctrl(rtwdev, bb, para->rac_lv))
return -EINVAL;
rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
"[NHM]nhm_incld_cca=%d, mntr_time=%d ms\n",
para->nhm_incld_cca, para->mntr_time);
if (para->mntr_time != env->nhm_mntr_time) {
rtw89_phy_ccx_ms_to_period_unit(rtwdev, para->mntr_time,
&period, &unit_idx);
rtw89_phy_write32_idx(rtwdev, ccx->nhm_config,
ccx->nhm_period_mask, period, bb->phy_idx);
rtw89_phy_write32_idx(rtwdev, ccx->nhm_config,
ccx->nhm_unit_mask, period, bb->phy_idx);
env->nhm_mntr_time = para->mntr_time;
env->ccx_period = period;
env->ccx_unit_idx = unit_idx;
}
if (para->nhm_incld_cca != env->nhm_include_cca) {
rtw89_phy_write32_idx(rtwdev, ccx->nhm_config,
ccx->nhm_include_cca_mask, para->nhm_incld_cca,
bb->phy_idx);
env->nhm_include_cca = para->nhm_incld_cca;
}
rtw89_phy_nhm_th_update(rtwdev, bb);
return 0;
}
static void __rtw89_phy_nhm_trigger(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb)
{
struct rtw89_ccx_para_info para = {
.mntr_time = RTW89_NHM_MNTR_TIME,
.rac_lv = RTW89_RAC_LV_1,
.nhm_incld_cca = true,
};
rtw89_phy_ccx_racing_release(rtwdev, bb);
rtw89_phy_nhm_set(rtwdev, bb, &para);
rtw89_phy_ccx_trigger(rtwdev, bb, RTW89_PHY_ENV_MON_NHM);
}
void rtw89_phy_nhm_trigger(struct rtw89_dev *rtwdev)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_bb_ctx *bb;
if (!chip->support_noise)
return;
rtw89_for_each_active_bb(rtwdev, bb)
__rtw89_phy_nhm_trigger(rtwdev, bb);
}
static int rtw89_phy_ifs_clm_set(struct rtw89_dev *rtwdev,
struct rtw89_bb_ctx *bb,
struct rtw89_ccx_para_info *para)
@ -5816,7 +6175,7 @@ static void __rtw89_phy_env_monitor_track(struct rtw89_dev *rtwdev,
if (rtw89_phy_ifs_clm_set(rtwdev, bb, &para) == 0)
chk_result |= RTW89_PHY_ENV_MON_IFS_CLM;
if (chk_result)
rtw89_phy_ccx_trigger(rtwdev, bb);
rtw89_phy_ccx_trigger(rtwdev, bb, chk_result);
rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
"get_result=0x%x, chk_result:0x%x\n",
@ -5930,8 +6289,6 @@ static void __rtw89_physts_parsing_init(struct rtw89_dev *rtwdev,
val |= BIT(RTW89_PHYSTS_IE13_DL_MU_DEF) |
BIT(RTW89_PHYSTS_IE01_CMN_OFDM);
} else if (i >= RTW89_CCK_PKT) {
val |= BIT(RTW89_PHYSTS_IE09_FTR_0);
val &= ~(GENMASK(RTW89_PHYSTS_IE07_CMN_EXT_PATH_D,
RTW89_PHYSTS_IE04_CMN_EXT_PATH_A));
@ -6910,6 +7267,7 @@ void rtw89_phy_dm_init(struct rtw89_dev *rtwdev)
rtw89_chip_bb_sethw(rtwdev);
rtw89_phy_env_monitor_init(rtwdev);
rtw89_phy_nhm_setting_init(rtwdev);
rtw89_physts_parsing_init(rtwdev);
rtw89_phy_dig_init(rtwdev);
rtw89_phy_cfo_init(rtwdev);
@ -6935,6 +7293,43 @@ void rtw89_phy_dm_reinit(struct rtw89_dev *rtwdev)
rtw89_physts_parsing_init(rtwdev);
}
static void __rtw89_phy_dm_init_data(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb)
{
struct rtw89_env_monitor_info *env = &bb->env_monitor;
const struct rtw89_chip_info *chip = rtwdev->chip;
struct ieee80211_supported_band *sband;
enum rtw89_band hw_band;
enum nl80211_band band;
u8 idx;
if (!chip->support_noise)
return;
for (band = 0; band < NUM_NL80211_BANDS; band++) {
sband = rtwdev->hw->wiphy->bands[band];
if (!sband)
continue;
hw_band = rtw89_nl80211_to_hw_band(band);
env->nhm_his[hw_band] =
devm_kcalloc(rtwdev->dev, sband->n_channels,
sizeof(*env->nhm_his[0]), GFP_KERNEL);
for (idx = 0; idx < sband->n_channels; idx++)
INIT_LIST_HEAD(&env->nhm_his[hw_band][idx].list);
INIT_LIST_HEAD(&env->nhm_rpt_list);
}
}
void rtw89_phy_dm_init_data(struct rtw89_dev *rtwdev)
{
struct rtw89_bb_ctx *bb;
rtw89_for_each_capab_bb(rtwdev, bb)
__rtw89_phy_dm_init_data(rtwdev, bb);
}
void rtw89_phy_set_bss_color(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link)
{
@ -7590,6 +7985,15 @@ static const struct rtw89_ccx_regs rtw89_ccx_regs_ax = {
.ifs_total_addr = R_IFSCNT,
.ifs_cnt_done_mask = B_IFSCNT_DONE_MSK,
.ifs_total_mask = B_IFSCNT_TOTAL_CNT_MSK,
.nhm = R_NHM_AX,
.nhm_ready = B_NHM_READY_MSK,
.nhm_config = R_NHM_CFG,
.nhm_period_mask = B_NHM_PERIOD_MSK,
.nhm_unit_mask = B_NHM_COUNTER_MSK,
.nhm_include_cca_mask = B_NHM_INCLUDE_CCA_MSK,
.nhm_en_mask = B_NHM_EN_MSK,
.nhm_method = R_NHM_TH9,
.nhm_pwr_method_msk = B_NHM_PWDB_METHOD_MSK,
};
static const struct rtw89_physts_regs rtw89_physts_regs_ax = {

24
phy.h
View file

@ -149,13 +149,14 @@ enum rtw89_phy_c2h_rfk_log_func {
RTW89_PHY_C2H_RFK_LOG_FUNC_RXDCK = 3,
RTW89_PHY_C2H_RFK_LOG_FUNC_TSSI = 4,
RTW89_PHY_C2H_RFK_LOG_FUNC_TXGAPK = 5,
RTW89_PHY_C2H_RFK_LOG_FUNC_TAS_PWR = 9,
RTW89_PHY_C2H_RFK_LOG_FUNC_NUM,
};
enum rtw89_phy_c2h_rfk_report_func {
RTW89_PHY_C2H_RFK_REPORT_FUNC_STATE = 0,
RTW89_PHY_C2H_RFK_LOG_TAS_PWR = 6,
RTW89_PHY_C2H_RFK_REPORT_FUNC_TAS_PWR = 6,
};
enum rtw89_phy_c2h_dm_func {
@ -188,6 +189,12 @@ enum rtw89_env_monitor_result_level {
RTW89_PHY_ENV_MON_EDCCA_CLM = BIT(4),
};
#define RTW89_NHM_WEIGHT_OFFSET 2
#define RTW89_NHM_WA_TH (109 << 1)
#define RTW89_NOISE_DEFAULT -96
#define RTW89_NHM_MNTR_TIME 40
#define RTW89_NHM_TH_FACTOR 1
#define CCX_US_BASE_RATIO 4
enum rtw89_ccx_unit {
RTW89_CCX_4_US = 0,
@ -428,6 +435,15 @@ struct rtw89_ccx_regs {
u32 ifs_total_addr;
u32 ifs_cnt_done_mask;
u32 ifs_total_mask;
u32 nhm;
u32 nhm_ready;
u32 nhm_config;
u32 nhm_period_mask;
u32 nhm_unit_mask;
u32 nhm_include_cca_mask;
u32 nhm_en_mask;
u32 nhm_method;
u32 nhm_pwr_method_msk;
};
struct rtw89_physts_regs {
@ -814,6 +830,7 @@ bool rtw89_phy_write_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
bool rtw89_phy_write_rf_v2(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
u32 addr, u32 mask, u32 data);
void rtw89_phy_init_bb_reg(struct rtw89_dev *rtwdev);
void rtw89_phy_init_bb_afe(struct rtw89_dev *rtwdev);
void rtw89_phy_init_rf_reg(struct rtw89_dev *rtwdev, bool noio);
void rtw89_phy_config_rf_reg_v1(struct rtw89_dev *rtwdev,
const struct rtw89_reg2_def *reg,
@ -821,6 +838,7 @@ void rtw89_phy_config_rf_reg_v1(struct rtw89_dev *rtwdev,
void *extra_data);
void rtw89_phy_dm_init(struct rtw89_dev *rtwdev);
void rtw89_phy_dm_reinit(struct rtw89_dev *rtwdev);
void rtw89_phy_dm_init_data(struct rtw89_dev *rtwdev);
void rtw89_phy_write32_idx(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
u32 data, enum rtw89_phy_idx phy_idx);
void rtw89_phy_write32_idx_set(struct rtw89_dev *rtwdev, u32 addr, u32 bits,
@ -1038,5 +1056,9 @@ enum rtw89_rf_path rtw89_phy_get_syn_sel(struct rtw89_dev *rtwdev,
u8 rtw89_rfk_chan_lookup(struct rtw89_dev *rtwdev,
const struct rtw89_rfk_chan_desc *desc, u8 desc_nr,
const struct rtw89_chan *target_chan);
void rtw89_phy_nhm_setting_init(struct rtw89_dev *rtwdev);
void rtw89_phy_nhm_get_result(struct rtw89_dev *rtwdev, enum rtw89_band hw_band,
u16 ch_hw_value);
void rtw89_phy_nhm_trigger(struct rtw89_dev *rtwdev);
#endif

View file

@ -63,6 +63,15 @@ static const struct rtw89_ccx_regs rtw89_ccx_regs_be = {
.ifs_total_addr = R_IFSCNT_V1,
.ifs_cnt_done_mask = B_IFSCNT_DONE_MSK,
.ifs_total_mask = B_IFSCNT_TOTAL_CNT_MSK,
.nhm = R_NHM_BE,
.nhm_ready = B_NHM_READY_BE_MSK,
.nhm_config = R_NHM_CFG,
.nhm_period_mask = B_NHM_PERIOD_MSK,
.nhm_unit_mask = B_NHM_COUNTER_MSK,
.nhm_include_cca_mask = B_NHM_INCLUDE_CCA_MSK,
.nhm_en_mask = B_NHM_EN_MSK,
.nhm_method = R_NHM_TH9,
.nhm_pwr_method_msk = B_NHM_PWDB_METHOD_MSK,
};
static const struct rtw89_physts_regs rtw89_physts_regs_be = {

3
ps.c
View file

@ -119,6 +119,9 @@ static void __rtw89_enter_lps_link(struct rtw89_dev *rtwdev,
rtw89_btc_ntfy_radio_state(rtwdev, BTC_RFCTRL_FW_CTRL);
rtw89_fw_h2c_lps_parm(rtwdev, &lps_param);
if (RTW89_CHK_FW_FEATURE(BEACON_TRACKING, &rtwdev->fw))
rtw89_fw_h2c_pwr_lvl(rtwdev, rtwvif_link);
}
static void __rtw89_leave_lps(struct rtw89_dev *rtwdev,

56
reg.h
View file

@ -3370,6 +3370,10 @@
#define B_AX_CSIPRT_HESU_AID_EN BIT(25)
#define B_AX_CSIPRT_VHTSU_AID_EN BIT(24)
#define R_AX_BCN_PSR_RPT_P0 0xCE84
#define R_AX_BCN_PSR_RPT_P0_C1 0xEE84
#define B_AX_BCAID_P0_MASK GENMASK(10, 0)
#define R_AX_RX_STATE_MONITOR 0xCEF0
#define R_AX_RX_STATE_MONITOR_C1 0xEEF0
#define B_AX_RX_STATE_MONITOR_MASK GENMASK(31, 0)
@ -6258,6 +6262,11 @@
#define B_BE_PTCL_TOP_ERR_IND BIT(1)
#define B_BE_SCHEDULE_TOP_ERR_IND BIT(0)
#define R_BE_CMAC_FW_TRIGGER_IDCT_ISR 0x10168
#define R_BE_CMAC_FW_TRIGGER_IDCT_ISR_C1 0x14168
#define B_BE_CMAC_FW_ERR_IDCT_IMR BIT(31)
#define B_BE_CMAC_FW_TRIG_IDCT BIT(0)
#define R_BE_SER_L0_DBG_CNT 0x10170
#define R_BE_SER_L0_DBG_CNT_C1 0x14170
#define B_BE_SER_L0_PHYINTF_CNT_MASK GENMASK(31, 24)
@ -7494,6 +7503,10 @@
#define R_BE_DRV_INFO_OPTION_C1 0x15470
#define B_BE_DRV_INFO_PHYRPT_EN BIT(0)
#define R_BE_BCN_PSR_RPT_P0 0x11484
#define R_BE_BCN_PSR_RPT_P0_C1 0x15484
#define B_BE_BCAID_P0_MASK GENMASK(10, 0)
#define R_BE_RX_ERR_ISR 0x114F4
#define R_BE_RX_ERR_ISR_C1 0x154F4
#define B_BE_RX_ERR_TRIG_ACT_TO BIT(9)
@ -8092,6 +8105,26 @@
#define B_MEASUREMENT_TRIG_MSK BIT(2)
#define B_CCX_TRIG_OPT_MSK BIT(1)
#define B_CCX_EN_MSK BIT(0)
#define R_NHM_CFG 0x0C08
#define B_NHM_PERIOD_MSK GENMASK(15, 0)
#define B_NHM_COUNTER_MSK GENMASK(17, 16)
#define B_NHM_EN_MSK BIT(18)
#define B_NHM_INCLUDE_CCA_MSK BIT(19)
#define B_NHM_TH0_MSK GENMASK(31, 24)
#define R_NHM_TH1 0x0C0C
#define B_NHM_TH1_MSK GENMASK(7, 0)
#define B_NHM_TH2_MSK GENMASK(15, 8)
#define B_NHM_TH3_MSK GENMASK(23, 16)
#define B_NHM_TH4_MSK GENMASK(31, 24)
#define R_NHM_TH5 0x0C10
#define B_NHM_TH5_MSK GENMASK(7, 0)
#define B_NHM_TH6_MSK GENMASK(15, 8)
#define B_NHM_TH7_MSK GENMASK(23, 16)
#define B_NHM_TH8_MSK GENMASK(31, 24)
#define R_NHM_TH9 0x0C14
#define B_NHM_TH9_MSK GENMASK(7, 0)
#define B_NHM_TH10_MSK GENMASK(15, 8)
#define B_NHM_PWDB_METHOD_MSK GENMASK(17, 16)
#define R_FAHM 0x0C1C
#define B_RXTD_CKEN BIT(2)
#define R_IFS_COUNTER 0x0C28
@ -8161,6 +8194,8 @@
#define R_BRK_ASYNC_RST_EN_1 0x0DC0
#define R_BRK_ASYNC_RST_EN_2 0x0DC4
#define R_BRK_ASYNC_RST_EN_3 0x0DC8
#define R_NHM_BE 0x0EA4
#define B_NHM_READY_BE_MSK BIT(16)
#define R_CTLTOP 0x1008
#define B_CTLTOP_ON BIT(23)
#define B_CTLTOP_VAL GENMASK(15, 12)
@ -8216,6 +8251,26 @@
#define B_SWSI_R_BUSY_V1 BIT(25)
#define B_SWSI_R_DATA_DONE_V1 BIT(26)
#define R_TX_COUNTER 0x1A40
#define R_NHM_CNT0 0x1A88
#define B_NHM_CNT0_MSK GENMASK(15, 0)
#define B_NHM_CNT1_MSK GENMASK(31, 16)
#define R_NHM_CNT2 0x1A8C
#define B_NHM_CNT2_MSK GENMASK(15, 0)
#define B_NHM_CNT3_MSK GENMASK(31, 16)
#define R_NHM_CNT4 0x1A90
#define B_NHM_CNT4_MSK GENMASK(15, 0)
#define B_NHM_CNT5_MSK GENMASK(31, 16)
#define R_NHM_CNT6 0x1A94
#define B_NHM_CNT6_MSK GENMASK(15, 0)
#define B_NHM_CNT7_MSK GENMASK(31, 16)
#define R_NHM_CNT8 0x1A98
#define B_NHM_CNT8_MSK GENMASK(15, 0)
#define B_NHM_CNT9_MSK GENMASK(31, 16)
#define R_NHM_CNT10 0x1A9C
#define B_NHM_CNT10_MSK GENMASK(15, 0)
#define B_NHM_CNT11_MSK GENMASK(31, 16)
#define R_NHM_AX 0x1AA4
#define B_NHM_READY_MSK BIT(16)
#define R_IFS_CLM_TX_CNT 0x1ACC
#define R_IFS_CLM_TX_CNT_V1 0x0ECC
#define B_IFS_CLM_EDCCA_EXCLUDE_CCA_FA_MSK GENMASK(31, 16)
@ -9126,6 +9181,7 @@
#define B_COEF_SEL_MDPD BIT(8)
#define B_COEF_SEL_MDPD_V1 GENMASK(9, 8)
#define B_COEF_SEL_EN BIT(31)
#define R_CFIR_COEF 0x810c
#define R_CFIR_SYS 0x8120
#define R_IQK_RES 0x8124
#define B_IQK_RES_K BIT(28)

View file

@ -2537,6 +2537,7 @@ static const struct rtw89_chip_ops rtw8851b_chip_ops = {
.query_rxdesc = rtw89_core_query_rxdesc,
.fill_txdesc = rtw89_core_fill_txdesc,
.fill_txdesc_fwcmd = rtw89_core_fill_txdesc,
.get_ch_dma = rtw89_core_get_ch_dma,
.cfg_ctrl_path = rtw89_mac_cfg_ctrl_path,
.mac_cfg_gnt = rtw89_mac_cfg_gnt,
.stop_sch_tx = rtw89_mac_stop_sch_tx,
@ -2628,6 +2629,7 @@ const struct rtw89_chip_info rtw8851b_chip_info = {
.support_ant_gain = false,
.support_tas = false,
.support_sar_by_ant = false,
.support_noise = false,
.ul_tb_waveform_ctrl = true,
.ul_tb_pwr_diff = false,
.rx_freq_frome_ie = true,
@ -2689,6 +2691,8 @@ const struct rtw89_chip_info rtw8851b_chip_info = {
.cfo_hw_comp = true,
.dcfo_comp = &rtw8851b_dcfo_comp,
.dcfo_comp_sft = 12,
.nhm_report = NULL,
.nhm_th = NULL,
.imr_info = &rtw8851b_imr_info,
.imr_dmac_table = NULL,
.imr_cmac_table = NULL,

View file

@ -17,8 +17,9 @@
#define DPK_RF_REG_NUM_8851B 4
#define DPK_KSET_NUM 4
#define RTW8851B_RXK_GROUP_NR 4
#define RTW8851B_RXK_GROUP_IDX_NR 2
#define RTW8851B_TXK_GROUP_NR 1
#define RTW8851B_RXK_GROUP_IDX_NR 4
#define RTW8851B_A_TXK_GROUP_NR 2
#define RTW8851B_G_TXK_GROUP_NR 1
#define RTW8851B_IQK_VER 0x14
#define RTW8851B_IQK_SS 1
#define RTW8851B_LOK_GRAM 10
@ -114,19 +115,21 @@ static const u32 _tssi_de_mcs_10m[RF_PATH_NUM_8851B] = {0x5830};
static const u32 g_idxrxgain[RTW8851B_RXK_GROUP_NR] = {0x10e, 0x116, 0x28e, 0x296};
static const u32 g_idxattc2[RTW8851B_RXK_GROUP_NR] = {0x0, 0xf, 0x0, 0xf};
static const u32 g_idxrxagc[RTW8851B_RXK_GROUP_NR] = {0x0, 0x1, 0x2, 0x3};
static const u32 a_idxrxgain[RTW8851B_RXK_GROUP_IDX_NR] = {0x10C, 0x28c};
static const u32 a_idxattc2[RTW8851B_RXK_GROUP_IDX_NR] = {0xf, 0xf};
static const u32 a_idxrxagc[RTW8851B_RXK_GROUP_IDX_NR] = {0x4, 0x6};
static const u32 a_power_range[RTW8851B_TXK_GROUP_NR] = {0x0};
static const u32 a_track_range[RTW8851B_TXK_GROUP_NR] = {0x6};
static const u32 a_gain_bb[RTW8851B_TXK_GROUP_NR] = {0x0a};
static const u32 a_itqt[RTW8851B_TXK_GROUP_NR] = {0x12};
static const u32 g_power_range[RTW8851B_TXK_GROUP_NR] = {0x0};
static const u32 g_track_range[RTW8851B_TXK_GROUP_NR] = {0x6};
static const u32 g_gain_bb[RTW8851B_TXK_GROUP_NR] = {0x10};
static const u32 g_itqt[RTW8851B_TXK_GROUP_NR] = {0x12};
static const u32 a_idxrxgain[RTW8851B_RXK_GROUP_IDX_NR] = {0x10C, 0x112, 0x28c, 0x292};
static const u32 a_idxattc2[RTW8851B_RXK_GROUP_IDX_NR] = {0xf, 0xf, 0xf, 0xf};
static const u32 a_idxrxagc[RTW8851B_RXK_GROUP_IDX_NR] = {0x4, 0x5, 0x6, 0x7};
static const u32 a_power_range[RTW8851B_A_TXK_GROUP_NR] = {0x0, 0x0};
static const u32 a_track_range[RTW8851B_A_TXK_GROUP_NR] = {0x7, 0x7};
static const u32 a_gain_bb[RTW8851B_A_TXK_GROUP_NR] = {0x08, 0x0d};
static const u32 a_itqt[RTW8851B_A_TXK_GROUP_NR] = {0x12, 0x12};
static const u32 a_att_smxr[RTW8851B_A_TXK_GROUP_NR] = {0x0, 0x2};
static const u32 g_power_range[RTW8851B_G_TXK_GROUP_NR] = {0x0};
static const u32 g_track_range[RTW8851B_G_TXK_GROUP_NR] = {0x6};
static const u32 g_gain_bb[RTW8851B_G_TXK_GROUP_NR] = {0x10};
static const u32 g_itqt[RTW8851B_G_TXK_GROUP_NR] = {0x12};
static const u32 rtw8851b_backup_bb_regs[] = {0xc0d4, 0xc0d8, 0xc0c4, 0xc0ec, 0xc0e8};
static const u32 rtw8851b_backup_bb_regs[] = {
0xc0d4, 0xc0d8, 0xc0c4, 0xc0ec, 0xc0e8, 0x12a0, 0xc0f0};
static const u32 rtw8851b_backup_rf_regs[] = {
0xef, 0xde, 0x0, 0x1e, 0x2, 0x85, 0x90, 0x5};
@ -139,17 +142,6 @@ static const u32 dpk_rf_reg[DPK_RF_REG_NUM_8851B] = {0xde, 0x8f, 0x5, 0x10005};
static void _set_ch(struct rtw89_dev *rtwdev, u32 val);
static u8 _rxk_5ghz_group_from_idx(u8 idx)
{
/* There are four RXK groups (RTW8851B_RXK_GROUP_NR), but only group 0
* and 2 are used in 5 GHz band, so reduce elements to 2.
*/
if (idx < RTW8851B_RXK_GROUP_IDX_NR)
return idx * 2;
return 0;
}
static u8 _kpath(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
{
return RF_A;
@ -196,7 +188,7 @@ static void _txck_force(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
static void _rxck_force(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
bool force, enum adc_ck ck)
{
static const u32 ck960_8851b[] = {0x8, 0x2, 0x2, 0x4, 0xf, 0xa, 0x93};
static const u32 ck960_8851b[] = {0x8, 0x2, 0x2, 0x4, 0xf, 0xa, 0x92};
static const u32 ck1920_8851b[] = {0x9, 0x0, 0x0, 0x3, 0xf, 0xa, 0x49};
const u32 *data;
@ -800,7 +792,7 @@ static bool _iqk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
"[IQK]============ S%d ID_NBTXK ============\n", path);
rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x0);
rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT,
0x00b);
0x11);
iqk_cmd = 0x408 | (1 << (4 + path));
break;
case ID_NBRXK:
@ -818,7 +810,7 @@ static bool _iqk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, iqk_cmd + 1);
notready = _iqk_check_cal(rtwdev, path);
if (iqk_info->iqk_sram_en &&
(ktype == ID_NBRXK || ktype == ID_RXK))
(ktype == ID_NBRXK || ktype == ID_RXK || ktype == ID_NBTXK))
_iqk_sram(rtwdev, path);
rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x0);
@ -905,18 +897,27 @@ static bool _rxk_5g_group_sel(struct rtw89_dev *rtwdev,
bool kfail = false;
bool notready;
u32 rf_0;
u8 idx;
u32 val;
u8 gp;
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
for (idx = 0; idx < RTW8851B_RXK_GROUP_IDX_NR; idx++) {
gp = _rxk_5ghz_group_from_idx(idx);
rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x1000);
rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RFREG_MASK, 0x4);
rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD0, RFREG_MASK, 0x17);
rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RFREG_MASK, 0x5);
rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD0, RFREG_MASK, 0x27);
rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x0);
val = rtw89_read_rf(rtwdev, RF_PATH_A, RR_RXA2, 0x20);
rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RR_MOD_MASK, 0xc);
for (gp = 0; gp < RTW8851B_RXK_GROUP_IDX_NR; gp++) {
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, gp = %x\n", path, gp);
rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RR_MOD_RGM, a_idxrxgain[idx]);
rtw89_write_rf(rtwdev, RF_PATH_A, RR_RXA2, RR_RXA2_ATT, a_idxattc2[idx]);
rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RR_MOD_RGM, a_idxrxgain[gp]);
rtw89_write_rf(rtwdev, RF_PATH_A, RR_RXA2, RR_RXA2_ATT, a_idxattc2[gp]);
rtw89_write_rf(rtwdev, RF_PATH_A, RR_RXA2, 0x20, 0x1);
rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, 0x1);
rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, 0x0);
@ -926,7 +927,7 @@ static bool _rxk_5g_group_sel(struct rtw89_dev *rtwdev,
fsleep(100);
rf_0 = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK);
rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI, rf_0);
rtw89_phy_write32_mask(rtwdev, R_IQK_RXA, B_IQK_RXAGC, a_idxrxagc[idx]);
rtw89_phy_write32_mask(rtwdev, R_IQK_RXA, B_IQK_RXAGC, a_idxrxagc[gp]);
rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x11);
notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXAGC);
@ -959,6 +960,7 @@ static bool _rxk_5g_group_sel(struct rtw89_dev *rtwdev,
_iqk_sram(rtwdev, path);
if (kfail) {
rtw89_phy_write32_mask(rtwdev, R_IQK_RES, B_IQK_RES_RXCFIR, 0x0);
rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD,
iqk_info->nb_rxcfir[path] | 0x2);
iqk_info->is_wb_txiqk[path] = false;
@ -968,6 +970,14 @@ static bool _rxk_5g_group_sel(struct rtw89_dev *rtwdev,
iqk_info->is_wb_txiqk[path] = true;
}
rtw89_write_rf(rtwdev, RF_PATH_A, RR_RXA2, 0x20, val);
rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x1000);
rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RFREG_MASK, 0x4);
rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD0, RFREG_MASK, 0x37);
rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RFREG_MASK, 0x5);
rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD0, RFREG_MASK, 0x27);
rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x0);
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"[IQK]S%x, kfail = 0x%x, 0x8%x3c = 0x%x\n", path, kfail,
1 << path, iqk_info->nb_rxcfir[path]);
@ -980,17 +990,26 @@ static bool _iqk_5g_nbrxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
bool kfail = false;
bool notready;
u8 idx = 0x1;
u8 gp = 2;
u32 rf_0;
u8 gp;
gp = _rxk_5ghz_group_from_idx(idx);
u32 val;
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, gp = %x\n", path, gp);
rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RR_MOD_RGM, a_idxrxgain[idx]);
rtw89_write_rf(rtwdev, RF_PATH_A, RR_RXA2, RR_RXA2_ATT, a_idxattc2[idx]);
rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x1000);
rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RFREG_MASK, 0x4);
rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD0, RFREG_MASK, 0x17);
rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RFREG_MASK, 0x5);
rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD0, RFREG_MASK, 0x27);
rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x0);
val = rtw89_read_rf(rtwdev, RF_PATH_A, RR_RXA2, 0x20);
rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RR_MOD_MASK, 0xc);
rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RR_MOD_RGM, a_idxrxgain[gp]);
rtw89_write_rf(rtwdev, RF_PATH_A, RR_RXA2, RR_RXA2_ATT, a_idxattc2[gp]);
rtw89_write_rf(rtwdev, RF_PATH_A, RR_RXA2, 0x20, 0x1);
rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, 0x1);
rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, 0x0);
@ -1000,7 +1019,7 @@ static bool _iqk_5g_nbrxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
fsleep(100);
rf_0 = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK);
rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI, rf_0);
rtw89_phy_write32_mask(rtwdev, R_IQK_RXA, B_IQK_RXAGC, a_idxrxagc[idx]);
rtw89_phy_write32_mask(rtwdev, R_IQK_RXA, B_IQK_RXAGC, a_idxrxagc[gp]);
rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x11);
notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXAGC);
@ -1026,6 +1045,7 @@ static bool _iqk_5g_nbrxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG);
if (kfail) {
rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8), 0xf, 0x0);
rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
MASKDWORD, 0x40000002);
iqk_info->is_wb_rxiqk[path] = false;
@ -1033,6 +1053,14 @@ static bool _iqk_5g_nbrxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
iqk_info->is_wb_rxiqk[path] = false;
}
rtw89_write_rf(rtwdev, RF_PATH_A, RR_RXA2, 0x20, val);
rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x1000);
rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RFREG_MASK, 0x4);
rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD0, RFREG_MASK, 0x37);
rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RFREG_MASK, 0x5);
rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD0, RFREG_MASK, 0x27);
rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x0);
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"[IQK]S%x, kfail = 0x%x, 0x8%x3c = 0x%x\n", path, kfail,
1 << path, iqk_info->nb_rxcfir[path]);
@ -1149,6 +1177,7 @@ static void _iqk_rxclk_setting(struct rtw89_dev *rtwdev, u8 path)
static bool _txk_5g_group_sel(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx, u8 path)
{
static const u8 a_idx[RTW8851B_A_TXK_GROUP_NR] = {2, 3};
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
bool kfail = false;
bool notready;
@ -1156,16 +1185,20 @@ static bool _txk_5g_group_sel(struct rtw89_dev *rtwdev,
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
for (gp = 0x0; gp < RTW8851B_TXK_GROUP_NR; gp++) {
rtw89_phy_write32_mask(rtwdev, R_CFIR_COEF, MASKDWORD, 0x33332222);
for (gp = 0x0; gp < RTW8851B_A_TXK_GROUP_NR; gp++) {
rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, a_power_range[gp]);
rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, a_track_range[gp]);
rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, a_gain_bb[gp]);
rtw89_write_rf(rtwdev, path, RR_BIASA, RR_BIASA_A, a_att_smxr[gp]);
rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, 0x1);
rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, 0x1);
rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G2, 0x0);
rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP, gp);
rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP, a_idx[gp]);
rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x11);
rtw89_phy_write32_mask(rtwdev, R_KIP_IQP, MASKDWORD, a_itqt[gp]);
notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK);
@ -1206,7 +1239,9 @@ static bool _txk_2g_group_sel(struct rtw89_dev *rtwdev,
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
for (gp = 0x0; gp < RTW8851B_TXK_GROUP_NR; gp++) {
rtw89_phy_write32_mask(rtwdev, R_CFIR_COEF, MASKDWORD, 0x0);
for (gp = 0x0; gp < RTW8851B_G_TXK_GROUP_NR; gp++) {
rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, g_power_range[gp]);
rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, g_track_range[gp]);
rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, g_gain_bb[gp]);
@ -1249,29 +1284,29 @@ static bool _txk_2g_group_sel(struct rtw89_dev *rtwdev,
static bool _iqk_5g_nbtxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
u8 path)
{
static const u8 a_idx[RTW8851B_A_TXK_GROUP_NR] = {2, 3};
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
bool kfail = false;
bool notready;
u8 gp;
u8 gp = 0;
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
for (gp = 0x0; gp < RTW8851B_TXK_GROUP_NR; gp++) {
rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, a_power_range[gp]);
rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, a_track_range[gp]);
rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, a_gain_bb[gp]);
rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, a_power_range[gp]);
rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, a_track_range[gp]);
rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, a_gain_bb[gp]);
rtw89_write_rf(rtwdev, path, RR_BIASA, RR_BIASA_A, a_att_smxr[gp]);
rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, 0x1);
rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, 0x1);
rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G2, 0x0);
rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP, gp);
rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
rtw89_phy_write32_mask(rtwdev, R_KIP_IQP, MASKDWORD, a_itqt[gp]);
rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, 0x1);
rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, 0x1);
rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G2, 0x0);
rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP, a_idx[gp]);
rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
rtw89_phy_write32_mask(rtwdev, R_KIP_IQP, MASKDWORD, a_itqt[gp]);
notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK);
iqk_info->nb_txcfir[path] =
rtw89_phy_read32_mask(rtwdev, R_TXIQC, MASKDWORD) | 0x2;
}
notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK);
iqk_info->nb_txcfir[path] =
rtw89_phy_read32_mask(rtwdev, R_TXIQC, MASKDWORD) | 0x2;
if (!notready)
kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG);
@ -1300,7 +1335,7 @@ static bool _iqk_2g_nbtxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
for (gp = 0x0; gp < RTW8851B_TXK_GROUP_NR; gp++) {
for (gp = 0x0; gp < RTW8851B_G_TXK_GROUP_NR; gp++) {
rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, g_power_range[gp]);
rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, g_track_range[gp]);
rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, g_gain_bb[gp]);
@ -1664,8 +1699,6 @@ static void _iqk_init(struct rtw89_dev *rtwdev)
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
u8 idx, path;
rtw89_phy_write32_mask(rtwdev, R_IQKINF, MASKDWORD, 0x0);
if (iqk_info->is_iqk_init)
return;

View file

@ -11,6 +11,7 @@
static const struct rtw89_pci_info rtw8851b_pci_info = {
.gen_def = &rtw89_pci_gen_ax,
.isr_def = &rtw89_pci_isr_ax,
.txbd_trunc_mode = MAC_AX_BD_TRUNC,
.rxbd_trunc_mode = MAC_AX_BD_TRUNC,
.rxbd_mode = MAC_AX_RXBD_PKT,
@ -28,6 +29,8 @@ static const struct rtw89_pci_info rtw8851b_pci_info = {
.rx_ring_eq_is_full = false,
.check_rx_tag = false,
.no_rxbd_fs = false,
.group_bd_addr = false,
.rpp_fmt_size = sizeof(struct rtw89_pci_rpp_fmt),
.init_cfg_reg = R_AX_PCIE_INIT_CFG1,
.txhci_en_bit = B_AX_TXHCI_EN,
@ -57,6 +60,7 @@ static const struct rtw89_pci_info rtw8851b_pci_info = {
.ltr_set = rtw89_pci_ltr_set,
.fill_txaddr_info = rtw89_pci_fill_txaddr_info,
.parse_rpp = rtw89_pci_parse_rpp,
.config_intr_mask = rtw89_pci_config_intr_mask,
.enable_intr = rtw89_pci_enable_intr,
.disable_intr = rtw89_pci_disable_intr,

View file

@ -16,6 +16,9 @@ static const struct rtw89_driver_info rtw89_8851bu_info = {
static const struct usb_device_id rtw_8851bu_id_table[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x0bda, 0xb851, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&rtw89_8851bu_info },
/* D-Link AX9U rev. A1 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x332a, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&rtw89_8851bu_info },
/* TP-Link Archer TX10UB Nano */
{ USB_DEVICE_AND_INTERFACE_INFO(0x3625, 0x010b, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&rtw89_8851bu_info },

View file

@ -426,6 +426,35 @@ static const struct rtw89_reg_def rtw8852a_dcfo_comp = {
R_DCFO_COMP_S0, B_DCFO_COMP_S0_MSK
};
static const struct rtw89_reg_def rtw8852a_nhm_th[RTW89_NHM_TH_NUM] = {
{R_NHM_CFG, B_NHM_TH0_MSK},
{R_NHM_TH1, B_NHM_TH1_MSK},
{R_NHM_TH1, B_NHM_TH2_MSK},
{R_NHM_TH1, B_NHM_TH3_MSK},
{R_NHM_TH1, B_NHM_TH4_MSK},
{R_NHM_TH5, B_NHM_TH5_MSK},
{R_NHM_TH5, B_NHM_TH6_MSK},
{R_NHM_TH5, B_NHM_TH7_MSK},
{R_NHM_TH5, B_NHM_TH8_MSK},
{R_NHM_TH9, B_NHM_TH9_MSK},
{R_NHM_TH9, B_NHM_TH10_MSK},
};
static const struct rtw89_reg_def rtw8852a_nhm_rpt[RTW89_NHM_RPT_NUM] = {
{R_NHM_CNT0, B_NHM_CNT0_MSK},
{R_NHM_CNT0, B_NHM_CNT1_MSK},
{R_NHM_CNT2, B_NHM_CNT2_MSK},
{R_NHM_CNT2, B_NHM_CNT3_MSK},
{R_NHM_CNT4, B_NHM_CNT4_MSK},
{R_NHM_CNT4, B_NHM_CNT5_MSK},
{R_NHM_CNT6, B_NHM_CNT6_MSK},
{R_NHM_CNT6, B_NHM_CNT7_MSK},
{R_NHM_CNT8, B_NHM_CNT8_MSK},
{R_NHM_CNT8, B_NHM_CNT9_MSK},
{R_NHM_CNT10, B_NHM_CNT10_MSK},
{R_NHM_CNT10, B_NHM_CNT11_MSK},
};
static const struct rtw89_imr_info rtw8852a_imr_info = {
.wdrls_imr_set = B_AX_WDRLS_IMR_SET,
.wsec_imr_reg = R_AX_SEC_DEBUG,
@ -2080,10 +2109,17 @@ static void rtw8852a_query_ppdu(struct rtw89_dev *rtwdev,
{
u8 path;
u8 *rx_power = phy_ppdu->rssi;
u8 raw;
if (!status->signal) {
if (phy_ppdu->to_self)
raw = ewma_rssi_read(&rtwdev->phystat.bcn_rssi);
else
raw = max(rx_power[RF_PATH_A], rx_power[RF_PATH_B]);
status->signal = RTW89_RSSI_RAW_TO_DBM(raw);
}
if (!status->signal)
status->signal = RTW89_RSSI_RAW_TO_DBM(max(rx_power[RF_PATH_A],
rx_power[RF_PATH_B]));
for (path = 0; path < rtwdev->chip->rf_path_num; path++) {
status->chains |= BIT(path);
status->chain_signal[path] = RTW89_RSSI_RAW_TO_DBM(rx_power[path]);
@ -2142,6 +2178,7 @@ static const struct rtw89_chip_ops rtw8852a_chip_ops = {
.query_rxdesc = rtw89_core_query_rxdesc,
.fill_txdesc = rtw89_core_fill_txdesc,
.fill_txdesc_fwcmd = rtw89_core_fill_txdesc,
.get_ch_dma = rtw89_core_get_ch_dma,
.cfg_ctrl_path = rtw89_mac_cfg_ctrl_path,
.mac_cfg_gnt = rtw89_mac_cfg_gnt,
.stop_sch_tx = rtw89_mac_stop_sch_tx,
@ -2220,6 +2257,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.support_ant_gain = false,
.support_tas = false,
.support_sar_by_ant = false,
.support_noise = true,
.ul_tb_waveform_ctrl = false,
.ul_tb_pwr_diff = false,
.rx_freq_frome_ie = true,
@ -2282,6 +2320,8 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.cfo_hw_comp = false,
.dcfo_comp = &rtw8852a_dcfo_comp,
.dcfo_comp_sft = 10,
.nhm_report = &rtw8852a_nhm_rpt,
.nhm_th = &rtw8852a_nhm_th,
.imr_info = &rtw8852a_imr_info,
.imr_dmac_table = NULL,
.imr_cmac_table = NULL,

View file

@ -11,6 +11,7 @@
static const struct rtw89_pci_info rtw8852a_pci_info = {
.gen_def = &rtw89_pci_gen_ax,
.isr_def = &rtw89_pci_isr_ax,
.txbd_trunc_mode = MAC_AX_BD_TRUNC,
.rxbd_trunc_mode = MAC_AX_BD_TRUNC,
.rxbd_mode = MAC_AX_RXBD_PKT,
@ -28,6 +29,8 @@ static const struct rtw89_pci_info rtw8852a_pci_info = {
.rx_ring_eq_is_full = false,
.check_rx_tag = false,
.no_rxbd_fs = false,
.group_bd_addr = false,
.rpp_fmt_size = sizeof(struct rtw89_pci_rpp_fmt),
.init_cfg_reg = R_AX_PCIE_INIT_CFG1,
.txhci_en_bit = B_AX_TXHCI_EN,
@ -55,6 +58,7 @@ static const struct rtw89_pci_info rtw8852a_pci_info = {
.ltr_set = rtw89_pci_ltr_set,
.fill_txaddr_info = rtw89_pci_fill_txaddr_info,
.parse_rpp = rtw89_pci_parse_rpp,
.config_intr_mask = rtw89_pci_config_intr_mask,
.enable_intr = rtw89_pci_enable_intr,
.disable_intr = rtw89_pci_disable_intr,

View file

@ -842,6 +842,7 @@ static const struct rtw89_chip_ops rtw8852b_chip_ops = {
.query_rxdesc = rtw89_core_query_rxdesc,
.fill_txdesc = rtw89_core_fill_txdesc,
.fill_txdesc_fwcmd = rtw89_core_fill_txdesc,
.get_ch_dma = rtw89_core_get_ch_dma,
.cfg_ctrl_path = rtw89_mac_cfg_ctrl_path,
.mac_cfg_gnt = rtw89_mac_cfg_gnt,
.stop_sch_tx = rtw89_mac_stop_sch_tx,
@ -939,6 +940,7 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
.support_ant_gain = true,
.support_tas = false,
.support_sar_by_ant = true,
.support_noise = false,
.ul_tb_waveform_ctrl = true,
.ul_tb_pwr_diff = false,
.rx_freq_frome_ie = true,
@ -1001,6 +1003,8 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
.cfo_hw_comp = true,
.dcfo_comp = &rtw8852b_dcfo_comp,
.dcfo_comp_sft = 10,
.nhm_report = NULL,
.nhm_th = NULL,
.imr_info = &rtw8852b_imr_info,
.imr_dmac_table = NULL,
.imr_cmac_table = NULL,

View file

@ -11,6 +11,7 @@
static const struct rtw89_pci_info rtw8852b_pci_info = {
.gen_def = &rtw89_pci_gen_ax,
.isr_def = &rtw89_pci_isr_ax,
.txbd_trunc_mode = MAC_AX_BD_TRUNC,
.rxbd_trunc_mode = MAC_AX_BD_TRUNC,
.rxbd_mode = MAC_AX_RXBD_PKT,
@ -28,6 +29,8 @@ static const struct rtw89_pci_info rtw8852b_pci_info = {
.rx_ring_eq_is_full = false,
.check_rx_tag = false,
.no_rxbd_fs = false,
.group_bd_addr = false,
.rpp_fmt_size = sizeof(struct rtw89_pci_rpp_fmt),
.init_cfg_reg = R_AX_PCIE_INIT_CFG1,
.txhci_en_bit = B_AX_TXHCI_EN,
@ -57,6 +60,7 @@ static const struct rtw89_pci_info rtw8852b_pci_info = {
.ltr_set = rtw89_pci_ltr_set,
.fill_txaddr_info = rtw89_pci_fill_txaddr_info,
.parse_rpp = rtw89_pci_parse_rpp,
.config_intr_mask = rtw89_pci_config_intr_mask,
.enable_intr = rtw89_pci_enable_intr,
.disable_intr = rtw89_pci_disable_intr,

View file

@ -708,6 +708,7 @@ static const struct rtw89_chip_ops rtw8852bt_chip_ops = {
.query_rxdesc = rtw89_core_query_rxdesc,
.fill_txdesc = rtw89_core_fill_txdesc,
.fill_txdesc_fwcmd = rtw89_core_fill_txdesc,
.get_ch_dma = rtw89_core_get_ch_dma,
.cfg_ctrl_path = rtw89_mac_cfg_ctrl_path,
.mac_cfg_gnt = rtw89_mac_cfg_gnt,
.stop_sch_tx = rtw89_mac_stop_sch_tx,

View file

@ -1799,22 +1799,14 @@ static void _dpk_onoff(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool o
{
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
u8 val, kidx = dpk->cur_idx[path];
bool off_reverse;
val = dpk->is_dpk_enable && !off && dpk->bp[path][kidx].path_ok;
if (off)
off_reverse = false;
else
off_reverse = true;
val = dpk->is_dpk_enable & off_reverse & dpk->bp[path][kidx].path_ok;
rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
BIT(24), val);
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s !!!\n", path,
kidx, str_enable_disable(dpk->is_dpk_enable & off_reverse));
kidx, str_enable_disable(dpk->is_dpk_enable && !off));
}
static void _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
@ -1883,8 +1875,8 @@ static void _dpk_information(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"[DPK] S%d[%d] (PHY%d): TSSI %s/ DBCC %s/ %s/ CH%d/ %s\n",
path, dpk->cur_idx[path], phy,
rtwdev->is_tssi_mode[path] ? "on" : "off",
rtwdev->dbcc_en ? "on" : "off",
str_on_off(rtwdev->is_tssi_mode[path]),
str_on_off(rtwdev->dbcc_en),
dpk->bp[path][kidx].band == 0 ? "2G" :
dpk->bp[path][kidx].band == 1 ? "5G" : "6G",
dpk->bp[path][kidx].ch,

View file

@ -17,6 +17,7 @@ static const struct rtw89_pci_ssid_quirk rtw8852bt_pci_ssid_quirks[] = {
static const struct rtw89_pci_info rtw8852bt_pci_info = {
.gen_def = &rtw89_pci_gen_ax,
.isr_def = &rtw89_pci_isr_ax,
.txbd_trunc_mode = MAC_AX_BD_TRUNC,
.rxbd_trunc_mode = MAC_AX_BD_TRUNC,
.rxbd_mode = MAC_AX_RXBD_PKT,
@ -34,6 +35,8 @@ static const struct rtw89_pci_info rtw8852bt_pci_info = {
.rx_ring_eq_is_full = false,
.check_rx_tag = false,
.no_rxbd_fs = false,
.group_bd_addr = false,
.rpp_fmt_size = sizeof(struct rtw89_pci_rpp_fmt),
.init_cfg_reg = R_AX_PCIE_INIT_CFG1,
.txhci_en_bit = B_AX_TXHCI_EN,
@ -63,6 +66,7 @@ static const struct rtw89_pci_info rtw8852bt_pci_info = {
.ltr_set = rtw89_pci_ltr_set,
.fill_txaddr_info = rtw89_pci_fill_txaddr_info,
.parse_rpp = rtw89_pci_parse_rpp,
.config_intr_mask = rtw89_pci_config_intr_mask,
.enable_intr = rtw89_pci_enable_intr,
.disable_intr = rtw89_pci_disable_intr,

View file

@ -30,6 +30,8 @@ static const struct usb_device_id rtw_8852bu_id_table[] = {
.driver_info = (kernel_ulong_t)&rtw89_8852bu_info },
{ USB_DEVICE_AND_INTERFACE_INFO(0x0db0, 0x6931, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&rtw89_8852bu_info },
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3327, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&rtw89_8852bu_info },
{ USB_DEVICE_AND_INTERFACE_INFO(0x3574, 0x6121, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&rtw89_8852bu_info },
{ USB_DEVICE_AND_INTERFACE_INFO(0x35bc, 0x0100, 0xff, 0xff, 0xff),

View file

@ -2962,6 +2962,7 @@ static const struct rtw89_chip_ops rtw8852c_chip_ops = {
.query_rxdesc = rtw89_core_query_rxdesc,
.fill_txdesc = rtw89_core_fill_txdesc_v1,
.fill_txdesc_fwcmd = rtw89_core_fill_txdesc_fwcmd_v1,
.get_ch_dma = rtw89_core_get_ch_dma,
.cfg_ctrl_path = rtw89_mac_cfg_ctrl_path_v1,
.mac_cfg_gnt = rtw89_mac_cfg_gnt_v1,
.stop_sch_tx = rtw89_mac_stop_sch_tx_v1,
@ -3043,6 +3044,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.support_ant_gain = true,
.support_tas = true,
.support_sar_by_ant = true,
.support_noise = false,
.ul_tb_waveform_ctrl = false,
.ul_tb_pwr_diff = true,
.rx_freq_frome_ie = false,
@ -3106,6 +3108,8 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.cfo_hw_comp = false,
.dcfo_comp = &rtw8852c_dcfo_comp,
.dcfo_comp_sft = 12,
.nhm_report = NULL,
.nhm_th = NULL,
.imr_info = &rtw8852c_imr_info,
.imr_dmac_table = NULL,
.imr_cmac_table = NULL,

View file

@ -20,6 +20,7 @@ static const struct rtw89_pci_bd_idx_addr rtw8852c_bd_idx_addr_low_power = {
static const struct rtw89_pci_info rtw8852c_pci_info = {
.gen_def = &rtw89_pci_gen_ax,
.isr_def = &rtw89_pci_isr_ax,
.txbd_trunc_mode = MAC_AX_BD_TRUNC,
.rxbd_trunc_mode = MAC_AX_BD_TRUNC,
.rxbd_mode = MAC_AX_RXBD_PKT,
@ -37,6 +38,8 @@ static const struct rtw89_pci_info rtw8852c_pci_info = {
.rx_ring_eq_is_full = false,
.check_rx_tag = false,
.no_rxbd_fs = false,
.group_bd_addr = false,
.rpp_fmt_size = sizeof(struct rtw89_pci_rpp_fmt),
.init_cfg_reg = R_AX_HAXI_INIT_CFG1,
.txhci_en_bit = B_AX_TXHCI_EN_V1,
@ -64,6 +67,7 @@ static const struct rtw89_pci_info rtw8852c_pci_info = {
.ltr_set = rtw89_pci_ltr_set_v1,
.fill_txaddr_info = rtw89_pci_fill_txaddr_info_v1,
.parse_rpp = rtw89_pci_parse_rpp,
.config_intr_mask = rtw89_pci_config_intr_mask_v1,
.enable_intr = rtw89_pci_enable_intr_v1,
.disable_intr = rtw89_pci_disable_intr_v1,

View file

@ -2765,6 +2765,10 @@ static int rtw8922a_mac_disable_bb_rf(struct rtw89_dev *rtwdev)
return 0;
}
static const struct rtw89_chanctx_listener rtw8922a_chanctx_listener = {
.callbacks[RTW89_CHANCTX_CALLBACK_TAS] = rtw89_tas_chanctx_cb,
};
#ifdef CONFIG_PM
static const struct wiphy_wowlan_support rtw_wowlan_stub_8922a = {
.flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT |
@ -2817,6 +2821,7 @@ static const struct rtw89_chip_ops rtw8922a_chip_ops = {
.query_rxdesc = rtw89_core_query_rxdesc_v2,
.fill_txdesc = rtw89_core_fill_txdesc_v2,
.fill_txdesc_fwcmd = rtw89_core_fill_txdesc_fwcmd_v2,
.get_ch_dma = rtw89_core_get_ch_dma,
.cfg_ctrl_path = rtw89_mac_cfg_ctrl_path_v2,
.mac_cfg_gnt = rtw89_mac_cfg_gnt_v2,
.stop_sch_tx = rtw89_mac_stop_sch_tx_v2,
@ -2875,6 +2880,7 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
.nctl_post_table = NULL,
.dflt_parms = NULL, /* load parm from fw */
.rfe_parms_conf = NULL, /* load parm from fw */
.chanctx_listener = &rtw8922a_chanctx_listener,
.txpwr_factor_bb = 3,
.txpwr_factor_rf = 2,
.txpwr_factor_mac = 1,
@ -2894,8 +2900,9 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
BIT(NL80211_CHAN_WIDTH_160),
.support_unii4 = true,
.support_ant_gain = true,
.support_tas = false,
.support_tas = true,
.support_sar_by_ant = true,
.support_noise = false,
.ul_tb_waveform_ctrl = false,
.ul_tb_pwr_diff = false,
.rx_freq_frome_ie = false,
@ -2958,6 +2965,8 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
.cfo_hw_comp = true,
.dcfo_comp = NULL,
.dcfo_comp_sft = 0,
.nhm_report = NULL,
.nhm_th = NULL,
.imr_info = NULL,
.imr_dmac_table = &rtw8922a_imr_dmac_table,
.imr_cmac_table = &rtw8922a_imr_cmac_table,

View file

@ -17,6 +17,7 @@ static const struct rtw89_pci_ssid_quirk rtw8922a_pci_ssid_quirks[] = {
static const struct rtw89_pci_info rtw8922a_pci_info = {
.gen_def = &rtw89_pci_gen_be,
.isr_def = &rtw89_pci_isr_be,
.txbd_trunc_mode = MAC_AX_BD_TRUNC,
.rxbd_trunc_mode = MAC_AX_BD_TRUNC,
.rxbd_mode = MAC_AX_RXBD_PKT,
@ -34,6 +35,8 @@ static const struct rtw89_pci_info rtw8922a_pci_info = {
.rx_ring_eq_is_full = true,
.check_rx_tag = true,
.no_rxbd_fs = true,
.group_bd_addr = false,
.rpp_fmt_size = sizeof(struct rtw89_pci_rpp_fmt),
.init_cfg_reg = R_BE_HAXI_INIT_CFG1,
.txhci_en_bit = B_BE_TXDMA_EN,
@ -61,6 +64,7 @@ static const struct rtw89_pci_info rtw8922a_pci_info = {
.ltr_set = rtw89_pci_ltr_set_v2,
.fill_txaddr_info = rtw89_pci_fill_txaddr_info_v1,
.parse_rpp = rtw89_pci_parse_rpp,
.config_intr_mask = rtw89_pci_config_intr_mask_v2,
.enable_intr = rtw89_pci_enable_intr_v2,
.disable_intr = rtw89_pci_disable_intr_v2,

15
sar.c
View file

@ -4,6 +4,7 @@
#include "acpi.h"
#include "debug.h"
#include "fw.h"
#include "phy.h"
#include "reg.h"
#include "sar.h"
@ -843,6 +844,20 @@ void rtw89_tas_chanctx_cb(struct rtw89_dev *rtwdev,
}
EXPORT_SYMBOL(rtw89_tas_chanctx_cb);
void rtw89_tas_fw_timer_enable(struct rtw89_dev *rtwdev, bool enable)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_tas_info *tas = &rtwdev->tas;
if (!tas->enable)
return;
if (chip->chip_gen == RTW89_CHIP_AX)
return;
rtw89_fw_h2c_rf_tas_trigger(rtwdev, enable);
}
void rtw89_sar_init(struct rtw89_dev *rtwdev)
{
rtw89_set_sar_from_acpi(rtwdev);

1
sar.h
View file

@ -37,6 +37,7 @@ void rtw89_tas_reset(struct rtw89_dev *rtwdev, bool force);
void rtw89_tas_scan(struct rtw89_dev *rtwdev, bool start);
void rtw89_tas_chanctx_cb(struct rtw89_dev *rtwdev,
enum rtw89_chanctx_state state);
void rtw89_tas_fw_timer_enable(struct rtw89_dev *rtwdev, bool enable);
void rtw89_sar_init(struct rtw89_dev *rtwdev);
void rtw89_sar_track(struct rtw89_dev *rtwdev);

5
ser.c
View file

@ -205,7 +205,6 @@ static void rtw89_ser_hdl_work(struct work_struct *work)
static int ser_send_msg(struct rtw89_ser *ser, u8 event)
{
struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
struct ser_msg *msg = NULL;
if (test_bit(RTW89_SER_DRV_STOP_RUN, ser->flags))
@ -221,7 +220,7 @@ static int ser_send_msg(struct rtw89_ser *ser, u8 event)
list_add(&msg->list, &ser->msg_q);
spin_unlock_irq(&ser->msg_q_lock);
ieee80211_queue_work(rtwdev->hw, &ser->ser_hdl_work);
schedule_work(&ser->ser_hdl_work);
return 0;
}
@ -502,7 +501,9 @@ static void ser_reset_trx_st_hdl(struct rtw89_ser *ser, u8 evt)
}
drv_stop_rx(ser);
wiphy_lock(wiphy);
drv_trx_reset(ser);
wiphy_unlock(wiphy);
/* wait m3 */
hal_send_m2_event(ser);

38
txrx.h
View file

@ -572,6 +572,7 @@ struct rtw89_phy_sts_ie00 {
} __packed;
#define RTW89_PHY_STS_IE00_W0_RPL GENMASK(15, 7)
#define RTW89_PHY_STS_IE00_W3_RX_PATH_EN GENMASK(31, 28)
struct rtw89_phy_sts_ie00_v2 {
__le32 w0;
@ -732,43 +733,6 @@ rtw89_core_get_qsel_mgmt(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request
return RTW89_TX_QSEL_B0_MGMT;
}
static inline u8 rtw89_core_get_ch_dma(struct rtw89_dev *rtwdev, u8 qsel)
{
switch (qsel) {
default:
rtw89_warn(rtwdev, "Cannot map qsel to dma: %d\n", qsel);
fallthrough;
case RTW89_TX_QSEL_BE_0:
case RTW89_TX_QSEL_BE_1:
case RTW89_TX_QSEL_BE_2:
case RTW89_TX_QSEL_BE_3:
return RTW89_TXCH_ACH0;
case RTW89_TX_QSEL_BK_0:
case RTW89_TX_QSEL_BK_1:
case RTW89_TX_QSEL_BK_2:
case RTW89_TX_QSEL_BK_3:
return RTW89_TXCH_ACH1;
case RTW89_TX_QSEL_VI_0:
case RTW89_TX_QSEL_VI_1:
case RTW89_TX_QSEL_VI_2:
case RTW89_TX_QSEL_VI_3:
return RTW89_TXCH_ACH2;
case RTW89_TX_QSEL_VO_0:
case RTW89_TX_QSEL_VO_1:
case RTW89_TX_QSEL_VO_2:
case RTW89_TX_QSEL_VO_3:
return RTW89_TXCH_ACH3;
case RTW89_TX_QSEL_B0_MGMT:
return RTW89_TXCH_CH8;
case RTW89_TX_QSEL_B0_HI:
return RTW89_TXCH_CH9;
case RTW89_TX_QSEL_B1_MGMT:
return RTW89_TXCH_CH10;
case RTW89_TX_QSEL_B1_HI:
return RTW89_TXCH_CH11;
}
}
static inline u8 rtw89_core_get_tid_indicate(struct rtw89_dev *rtwdev, u8 tid)
{
switch (tid) {

79
wow.c
View file

@ -99,13 +99,26 @@ static int rtw89_rx_pn_to_iv(struct rtw89_dev *rtwdev,
ieee80211_get_key_rx_seq(key, 0, &seq);
/* seq.ccmp.pn[] is BE order array */
pn = u64_encode_bits(seq.ccmp.pn[0], RTW89_KEY_PN_5) |
u64_encode_bits(seq.ccmp.pn[1], RTW89_KEY_PN_4) |
u64_encode_bits(seq.ccmp.pn[2], RTW89_KEY_PN_3) |
u64_encode_bits(seq.ccmp.pn[3], RTW89_KEY_PN_2) |
u64_encode_bits(seq.ccmp.pn[4], RTW89_KEY_PN_1) |
u64_encode_bits(seq.ccmp.pn[5], RTW89_KEY_PN_0);
switch (key->cipher) {
case WLAN_CIPHER_SUITE_TKIP:
pn = u64_encode_bits(seq.tkip.iv32, RTW89_KEY_TKIP_PN_IV32) |
u64_encode_bits(seq.tkip.iv16, RTW89_KEY_TKIP_PN_IV16);
break;
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_CCMP_256:
case WLAN_CIPHER_SUITE_GCMP_256:
/* seq.ccmp.pn[] is BE order array */
pn = u64_encode_bits(seq.ccmp.pn[0], RTW89_KEY_PN_5) |
u64_encode_bits(seq.ccmp.pn[1], RTW89_KEY_PN_4) |
u64_encode_bits(seq.ccmp.pn[2], RTW89_KEY_PN_3) |
u64_encode_bits(seq.ccmp.pn[3], RTW89_KEY_PN_2) |
u64_encode_bits(seq.ccmp.pn[4], RTW89_KEY_PN_1) |
u64_encode_bits(seq.ccmp.pn[5], RTW89_KEY_PN_0);
break;
default:
return -EINVAL;
}
err = _pn_to_iv(rtwdev, key, iv, pn, key->keyidx);
if (err)
@ -177,13 +190,26 @@ static int rtw89_rx_iv_to_pn(struct rtw89_dev *rtwdev,
if (err)
return err;
/* seq.ccmp.pn[] is BE order array */
seq.ccmp.pn[0] = u64_get_bits(pn, RTW89_KEY_PN_5);
seq.ccmp.pn[1] = u64_get_bits(pn, RTW89_KEY_PN_4);
seq.ccmp.pn[2] = u64_get_bits(pn, RTW89_KEY_PN_3);
seq.ccmp.pn[3] = u64_get_bits(pn, RTW89_KEY_PN_2);
seq.ccmp.pn[4] = u64_get_bits(pn, RTW89_KEY_PN_1);
seq.ccmp.pn[5] = u64_get_bits(pn, RTW89_KEY_PN_0);
switch (key->cipher) {
case WLAN_CIPHER_SUITE_TKIP:
seq.tkip.iv32 = u64_get_bits(pn, RTW89_KEY_TKIP_PN_IV32);
seq.tkip.iv16 = u64_get_bits(pn, RTW89_KEY_TKIP_PN_IV16);
break;
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_CCMP_256:
case WLAN_CIPHER_SUITE_GCMP_256:
/* seq.ccmp.pn[] is BE order array */
seq.ccmp.pn[0] = u64_get_bits(pn, RTW89_KEY_PN_5);
seq.ccmp.pn[1] = u64_get_bits(pn, RTW89_KEY_PN_4);
seq.ccmp.pn[2] = u64_get_bits(pn, RTW89_KEY_PN_3);
seq.ccmp.pn[3] = u64_get_bits(pn, RTW89_KEY_PN_2);
seq.ccmp.pn[4] = u64_get_bits(pn, RTW89_KEY_PN_1);
seq.ccmp.pn[5] = u64_get_bits(pn, RTW89_KEY_PN_0);
break;
default:
return -EINVAL;
}
ieee80211_set_key_rx_seq(key, 0, &seq);
rtw89_debug(rtwdev, RTW89_DBG_WOW, "%s key %d iv-%*ph to pn-%*ph\n",
@ -285,6 +311,11 @@ static void rtw89_wow_get_key_info_iter(struct ieee80211_hw *hw,
switch (key->cipher) {
case WLAN_CIPHER_SUITE_TKIP:
if (sta)
memcpy(gtk_info->txmickey,
key->key + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY,
sizeof(gtk_info->txmickey));
fallthrough;
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_CCMP_256:
@ -348,10 +379,27 @@ static void rtw89_wow_set_key_info_iter(struct ieee80211_hw *hw,
struct rtw89_wow_aoac_report *aoac_rpt = &rtw_wow->aoac_rpt;
struct rtw89_set_key_info_iter_data *iter_data = data;
bool update_tx_key_info = iter_data->rx_ready;
u8 tmp[RTW89_MIC_KEY_LEN];
int ret;
switch (key->cipher) {
case WLAN_CIPHER_SUITE_TKIP:
/*
* TX MIC KEY and RX MIC KEY is oppsite in FW,
* need to swap it before sending to mac80211.
*/
if (!sta && update_tx_key_info && aoac_rpt->rekey_ok &&
!iter_data->tkip_gtk_swapped) {
memcpy(tmp, &aoac_rpt->gtk[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
RTW89_MIC_KEY_LEN);
memcpy(&aoac_rpt->gtk[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
&aoac_rpt->gtk[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
RTW89_MIC_KEY_LEN);
memcpy(&aoac_rpt->gtk[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
tmp, RTW89_MIC_KEY_LEN);
iter_data->tkip_gtk_swapped = true;
}
fallthrough;
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_CCMP_256:
@ -642,7 +690,8 @@ static void rtw89_wow_update_key_info(struct rtw89_dev *rtwdev, bool rx_ready)
struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
struct rtw89_wow_aoac_report *aoac_rpt = &rtw_wow->aoac_rpt;
struct rtw89_set_key_info_iter_data data = {.error = false,
.rx_ready = rx_ready};
.rx_ready = rx_ready,
.tkip_gtk_swapped = false};
struct ieee80211_bss_conf *bss_conf;
struct ieee80211_key_conf *key;

6
wow.h
View file

@ -5,6 +5,9 @@
#ifndef __RTW89_WOW_H__
#define __RTW89_WOW_H__
#define RTW89_KEY_TKIP_PN_IV16 GENMASK_ULL(15, 0)
#define RTW89_KEY_TKIP_PN_IV32 GENMASK_ULL(47, 16)
#define RTW89_KEY_PN_0 GENMASK_ULL(7, 0)
#define RTW89_KEY_PN_1 GENMASK_ULL(15, 8)
#define RTW89_KEY_PN_2 GENMASK_ULL(23, 16)
@ -25,6 +28,8 @@
#define RTW89_WOW_SYMBOL_CHK_PTK BIT(0)
#define RTW89_WOW_SYMBOL_CHK_GTK BIT(1)
#define RTW89_MIC_KEY_LEN 8
enum rtw89_wake_reason {
RTW89_WOW_RSN_RX_PTK_REKEY = 0x1,
RTW89_WOW_RSN_RX_GTK_REKEY = 0x2,
@ -73,6 +78,7 @@ struct rtw89_set_key_info_iter_data {
u32 igtk_cipher;
bool rx_ready;
bool error;
bool tkip_gtk_swapped;
};
static inline int rtw89_wow_get_sec_hdr_len(struct rtw89_dev *rtwdev)