mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	wifi: iwlwifi: remove bc_table_dword transport config
There's really no point in configuring this, it's just a question of hardware capability. Remove it. Signed-off-by: Johannes Berg <johannes.berg@intel.com> Reviewed-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com> Signed-off-by: Miri Korenblit <miriam.rachel.korenblit@intel.com> Link: https://patch.msgid.link/20250503224232.6af4ea001226.I693f72a7c3a76e44f9ef2cefd62d606ad100a734@changeid
This commit is contained in:
		
							parent
							
								
									995727b113
								
							
						
					
					
						commit
						6570ea2278
					
				
					 6 changed files with 2 additions and 14 deletions
				
			
		| 
						 | 
				
			
			@ -401,8 +401,6 @@ struct iwl_dump_sanitize_ops {
 | 
			
		|||
 * @n_no_reclaim_cmds: # of commands in list
 | 
			
		||||
 * @rx_buf_size: RX buffer size needed for A-MSDUs
 | 
			
		||||
 *	if unset 4k will be the RX buffer size
 | 
			
		||||
 * @bc_table_dword: set to true if the BC table expects the byte count to be
 | 
			
		||||
 *	in DWORD (as opposed to bytes)
 | 
			
		||||
 * @scd_set_active: should the transport configure the SCD for HCMD queue
 | 
			
		||||
 * @command_groups: array of command groups, each member is an array of the
 | 
			
		||||
 *	commands in the group; for debugging only
 | 
			
		||||
| 
						 | 
				
			
			@ -423,7 +421,6 @@ struct iwl_trans_config {
 | 
			
		|||
	unsigned int n_no_reclaim_cmds;
 | 
			
		||||
 | 
			
		||||
	enum iwl_amsdu_size rx_buf_size;
 | 
			
		||||
	bool bc_table_dword;
 | 
			
		||||
	bool scd_set_active;
 | 
			
		||||
	const struct iwl_hcmd_arr *command_groups;
 | 
			
		||||
	int command_groups_size;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1445,8 +1445,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
 | 
			
		|||
	trans_cfg.rx_buf_size = iwl_amsdu_size_to_rxb_size();
 | 
			
		||||
 | 
			
		||||
	trans->wide_cmd_header = true;
 | 
			
		||||
	trans_cfg.bc_table_dword =
 | 
			
		||||
		mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210;
 | 
			
		||||
 | 
			
		||||
	trans_cfg.command_groups = iwl_mvm_groups;
 | 
			
		||||
	trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -289,7 +289,6 @@ enum iwl_pcie_imr_status {
 | 
			
		|||
/**
 | 
			
		||||
 * struct iwl_pcie_txqs - TX queues data
 | 
			
		||||
 *
 | 
			
		||||
 * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
 | 
			
		||||
 * @page_offs: offset from skb->cb to mac header page pointer
 | 
			
		||||
 * @dev_cmd_offs: offset from skb->cb to iwl_device_tx_cmd pointer
 | 
			
		||||
 * @queue_used: bit mask of used queues
 | 
			
		||||
| 
						 | 
				
			
			@ -315,7 +314,6 @@ struct iwl_pcie_txqs {
 | 
			
		|||
	struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];
 | 
			
		||||
	struct dma_pool *bc_pool;
 | 
			
		||||
	size_t bc_tbl_size;
 | 
			
		||||
	bool bc_table_dword;
 | 
			
		||||
	u8 page_offs;
 | 
			
		||||
	u8 dev_cmd_offs;
 | 
			
		||||
	struct iwl_tso_hdr_page __percpu *tso_hdr_page;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1993,7 +1993,6 @@ void iwl_trans_pcie_configure(struct iwl_trans *trans,
 | 
			
		|||
	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
 | 
			
		||||
		trans_pcie->supported_dma_mask = DMA_BIT_MASK(11);
 | 
			
		||||
 | 
			
		||||
	trans_pcie->txqs.bc_table_dword = trans_cfg->bc_table_dword;
 | 
			
		||||
	trans_pcie->scd_set_active = trans_cfg->scd_set_active;
 | 
			
		||||
 | 
			
		||||
	trans->command_groups = trans_cfg->command_groups;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -561,7 +561,6 @@ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans,
 | 
			
		|||
					  struct iwl_txq *txq, u16 byte_cnt,
 | 
			
		||||
					  int num_tbs)
 | 
			
		||||
{
 | 
			
		||||
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 | 
			
		||||
	int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
 | 
			
		||||
	u8 filled_tfd_size, num_fetch_chunks;
 | 
			
		||||
	u16 len = byte_cnt;
 | 
			
		||||
| 
						 | 
				
			
			@ -585,16 +584,12 @@ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans,
 | 
			
		|||
	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
 | 
			
		||||
		struct iwl_gen3_bc_tbl_entry *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
 | 
			
		||||
 | 
			
		||||
		/* Starting from AX210, the HW expects bytes */
 | 
			
		||||
		WARN_ON(trans_pcie->txqs.bc_table_dword);
 | 
			
		||||
		WARN_ON(len > 0x3FFF);
 | 
			
		||||
		bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
 | 
			
		||||
		scd_bc_tbl_gen3[idx].tfd_offset = bc_ent;
 | 
			
		||||
	} else {
 | 
			
		||||
		struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
 | 
			
		||||
 | 
			
		||||
		/* Before AX210, the HW expects DW */
 | 
			
		||||
		WARN_ON(!trans_pcie->txqs.bc_table_dword);
 | 
			
		||||
		len = DIV_ROUND_UP(len, 4);
 | 
			
		||||
		WARN_ON(len > 0xFFF);
 | 
			
		||||
		bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -2095,7 +2095,8 @@ static void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
 | 
			
		|||
		len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN;
 | 
			
		||||
		break;
 | 
			
		||||
	}
 | 
			
		||||
	if (trans_pcie->txqs.bc_table_dword)
 | 
			
		||||
 | 
			
		||||
	if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
 | 
			
		||||
		len = DIV_ROUND_UP(len, 4);
 | 
			
		||||
 | 
			
		||||
	if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue